diff --git a/.github/PULL_REQUEST_TEMPLATE/other.md b/.github/PULL_REQUEST_TEMPLATE/other.md index 3ffd47ed5d..eec7cbf6ee 100644 --- a/.github/PULL_REQUEST_TEMPLATE/other.md +++ b/.github/PULL_REQUEST_TEMPLATE/other.md @@ -1,3 +1,3 @@ - + diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..9d68a39241 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,18 @@ +# Please see the documentation for all configuration options: +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates +version: 2 +updates: +# GitHub Actions +- package-ecosystem: "github-actions" + # Workflow files stored in the + # default location of `.github/workflows` + directory: "/" + schedule: + interval: "weekly" + groups: + all-github-actions: + patterns: [ "*" ] + commit-message: + prefix: ":seedling:" + labels: + - "ok-to-test" diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 2d66292133..f6b8370e57 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,4 +1,4 @@ - - + + diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml new file mode 100644 index 0000000000..2bbbb33aea --- /dev/null +++ b/.github/workflows/golangci-lint.yml @@ -0,0 +1,39 @@ +name: golangci-lint +on: + pull_request: + types: [opened, edited, synchronize, reopened] + branches: + - main + +permissions: + # Required: allow read access to the content for analysis. + contents: read + # Optional: allow read access to pull request. Use with `only-new-issues` option. + pull-requests: read + # Optional: Allow write access to checks to allow the action to annotate code in the PR. + checks: write + +jobs: + golangci: + name: lint + runs-on: ubuntu-latest + strategy: + matrix: + working-directory: + - "" + - tools/setup-envtest + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: Calculate go version + id: vars + run: echo "go_version=$(make go-version)" >> $GITHUB_OUTPUT + - name: Set up Go + uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # tag=v6.0.0 + with: + go-version: ${{ steps.vars.outputs.go_version }} + - name: golangci-lint + uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # tag=v8.0.0 + with: + version: v2.5.0 + args: --output.text.print-linter-name=true --output.text.colors=true --timeout 10m + working-directory: ${{matrix.working-directory}} diff --git a/.github/workflows/ossf-scorecard.yaml b/.github/workflows/ossf-scorecard.yaml new file mode 100644 index 0000000000..24156f49e0 --- /dev/null +++ b/.github/workflows/ossf-scorecard.yaml @@ -0,0 +1,56 @@ +name: Scorecard supply-chain security +on: + # For Branch-Protection check. Only the default branch is supported. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection + branch_protection_rule: + # To guarantee Maintained check is occasionally updated. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained + schedule: + # Weekly on Saturdays. + - cron: '30 1 * * 6' + push: + branches: [ "main" ] + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecard analysis + runs-on: ubuntu-latest + permissions: + # Needed if using Code scanning alerts + security-events: write + # Needed for GitHub OIDC token if publish_results is true + id-token: write + + steps: + - name: "Checkout code" + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # tag=v2.4.3 + with: + results_file: results.sarif + results_format: sarif + # Public repositories: + # - Publish results to OpenSSF REST API for easy access by consumers + # - Allows the repository to include the Scorecard badge. + # - See https://github.com/ossf/scorecard-action#publishing-results. + publish_results: true + + # Upload the results as artifacts. + - name: "Upload artifact" + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # tag=v4.6.2 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # required for Code scanning alerts + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@83a02f7883b12e0e4e1a146174f5e2292a01e601 # tag=v2.16.4 + with: + sarif_file: results.sarif diff --git a/.github/workflows/pr-dependabot.yaml b/.github/workflows/pr-dependabot.yaml new file mode 100644 index 0000000000..10162e9129 --- /dev/null +++ b/.github/workflows/pr-dependabot.yaml @@ -0,0 +1,38 @@ +name: PR dependabot go modules fix + +# This action runs on PRs opened by dependabot and updates modules. +on: + pull_request: + branches: + - dependabot/** + push: + branches: + - dependabot/** + workflow_dispatch: + +permissions: + contents: write # Allow to update the PR. + +jobs: + build: + name: Build + runs-on: ubuntu-latest + steps: + - name: Check out code + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: Calculate go version + id: vars + run: echo "go_version=$(make go-version)" >> $GITHUB_OUTPUT + - name: Set up Go + uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # tag=v6.0.0 + with: + go-version: ${{ steps.vars.outputs.go_version }} + - name: Update all modules + run: make modules + - uses: EndBug/add-and-commit@a94899bca583c204427a224a7af87c02f9b325d5 # tag=v9.1.4 + name: Commit changes + with: + author_name: dependabot[bot] + author_email: 49699333+dependabot[bot]@users.noreply.github.com + default_author: github_actor + message: 'Update generated code' diff --git a/.github/workflows/pr-gh-workflow-approve.yaml b/.github/workflows/pr-gh-workflow-approve.yaml new file mode 100644 index 0000000000..28be4dac71 --- /dev/null +++ b/.github/workflows/pr-gh-workflow-approve.yaml @@ -0,0 +1,42 @@ +name: PR approve GH Workflows + +on: + pull_request_target: + types: + - edited + - labeled + - reopened + - synchronize + +permissions: {} + +jobs: + approve: + name: Approve ok-to-test + if: contains(github.event.pull_request.labels.*.name, 'ok-to-test') + runs-on: ubuntu-latest + permissions: + actions: write + steps: + - name: Update PR + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + continue-on-error: true + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const result = await github.rest.actions.listWorkflowRunsForRepo({ + owner: context.repo.owner, + repo: context.repo.repo, + event: "pull_request", + status: "action_required", + head_sha: context.payload.pull_request.head.sha, + per_page: 100 + }); + + for (var run of result.data.workflow_runs) { + await github.rest.actions.approveWorkflowRun({ + owner: context.repo.owner, + repo: context.repo.repo, + run_id: run.id + }); + } diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 0000000000..6e3b7734e7 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,35 @@ +name: Upload binaries to release + +on: + push: + # Sequence of patterns matched against refs/tags + tags: + - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10 + +permissions: + contents: write + +jobs: + build: + name: Upload binaries to release + runs-on: ubuntu-latest + steps: + - name: Set env + run: echo "RELEASE_TAG=${GITHUB_REF:10}" >> $GITHUB_ENV + - name: Check out code + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + - name: Calculate go version + id: vars + run: echo "go_version=$(make go-version)" >> $GITHUB_OUTPUT + - name: Set up Go + uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # tag=v6.0.0 + with: + go-version: ${{ steps.vars.outputs.go_version }} + - name: Generate release binaries + run: | + make release + - name: Release + uses: softprops/action-gh-release@62c96d0c4e8a889135c1f3a25910db8dbe0e85f7 # tag=v2.3.4 + with: + draft: false + files: tools/setup-envtest/out/* diff --git a/.github/workflows/verify.yml b/.github/workflows/verify.yml new file mode 100644 index 0000000000..2168d72516 --- /dev/null +++ b/.github/workflows/verify.yml @@ -0,0 +1,18 @@ +name: PR title verifier + +on: + pull_request_target: + types: [opened, edited, synchronize, reopened] + +jobs: + verify: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # tag=v5.0.0 + + - name: Check if PR title is valid + env: + PR_TITLE: ${{ github.event.pull_request.title }} + run: | + ./hack/verify-pr-title.sh "${PR_TITLE}" diff --git a/.gitignore b/.gitignore index c25057f685..2ddc5a8b87 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,15 @@ *.swp *.swo *~ + +# Vscode files +.vscode + +# Tools binaries. +hack/tools/bin + +# Release artifacts +tools/setup-envtest/out + +junit-report.xml +/artifacts diff --git a/.golangci.yml b/.golangci.yml index a418256a4a..85701c88a8 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,5 +1,196 @@ -linters-settings: - lll: - line-length: 170 - dupl: - threshold: 400 +version: "2" +run: + go: "1.24" + timeout: 10m + allow-parallel-runners: true +linters: + default: none + enable: + - asasalint + - asciicheck + - bidichk + - bodyclose + - copyloopvar + - dogsled + - dupl + - errcheck + - errchkjson + - errorlint + - exhaustive + - forbidigo + - ginkgolinter + - goconst + - gocritic + - gocyclo + - godoclint + - goprintffuncname + - govet + - importas + - ineffassign + - iotamixing + - makezero + - misspell + - nakedret + - nilerr + - nolintlint + - prealloc + - revive + - staticcheck + - tagliatelle + - unconvert + - unparam + - unused + - whitespace + settings: + forbidigo: + forbid: + - pattern: context.Background + msg: Use ginkgos SpecContext or go testings t.Context instead + - pattern: context.TODO + msg: Use ginkgos SpecContext or go testings t.Context instead + govet: + disable: + - fieldalignment + - shadow + enable-all: true + importas: + alias: + - pkg: k8s.io/api/core/v1 + alias: corev1 + - pkg: k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 + alias: apiextensionsv1 + - pkg: k8s.io/apimachinery/pkg/apis/meta/v1 + alias: metav1 + - pkg: k8s.io/apimachinery/pkg/api/errors + alias: apierrors + - pkg: k8s.io/apimachinery/pkg/util/errors + alias: kerrors + - pkg: sigs.k8s.io/controller-runtime + alias: ctrl + no-unaliased: true + revive: + rules: + # The following rules are recommended https://github.com/mgechev/revive#recommended-configuration + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: error-return + - name: error-strings + - name: error-naming + - name: exported + - name: if-return + - name: increment-decrement + - name: var-naming + - name: var-declaration + - name: range + - name: receiver-naming + - name: time-naming + - name: unexported-return + - name: indent-error-flow + - name: errorf + - name: superfluous-else + - name: unreachable-code + - name: redefines-builtin-id + # + # Rules in addition to the recommended configuration above. + # + - name: bool-literal-in-expr + - name: constant-logical-expr + exclusions: + generated: strict + paths: + - zz_generated.*\.go$ + - .*conversion.*\.go$ + rules: + - linters: + - forbidigo + path-except: _test\.go + - linters: + - gosec + text: 'G108: Profiling endpoint is automatically exposed on /debug/pprof' + - linters: + - revive + text: 'exported: exported method .*\.(Reconcile|SetupWithManager|SetupWebhookWithManager) should have comment or be unexported' + - linters: + - errcheck + text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked + - linters: + - staticcheck + text: 'SA1019: .*The component config package has been deprecated and will be removed in a future release.' + # With Go 1.16, the new embed directive can be used with an un-named import, + # revive (previously, golint) only allows these to be imported in a main.go, which wouldn't work for us. + # This directive allows the embed package to be imported with an underscore everywhere. + - linters: + - revive + source: _ "embed" + # Exclude some packages or code to require comments, for example test code, or fake clients. + - linters: + - revive + text: exported (method|function|type|const) (.+) should have comment or be unexported + source: (func|type).*Fake.* + - linters: + - revive + path: fake_\.go + text: exported (method|function|type|const) (.+) should have comment or be unexported + # Disable unparam "always receives" which might not be really + # useful when building libraries. + - linters: + - unparam + text: always receives + # Dot imports for gomega and ginkgo are allowed + # within test files. + - path: _test\.go + text: should not use dot imports + - path: _test\.go + text: cyclomatic complexity + - path: _test\.go + text: 'G107: Potential HTTP request made with variable url' + # Append should be able to assign to a different var/slice. + - linters: + - gocritic + text: 'appendAssign: append result not assigned to the same slice' + - linters: + - gocritic + text: 'singleCaseSwitch: should rewrite switch statement to if statement' + # It considers all file access to a filename that comes from a variable problematic, + # which is naiv at best. + - linters: + - gosec + text: 'G304: Potential file inclusion via variable' + - linters: + - dupl + path: _test\.go + - linters: + - revive + path: .*/internal/.* + - linters: + - unused + # Seems to incorrectly trigger on the two implementations that are only + # used through an interface and not directly..? + # Likely same issue as https://github.com/dominikh/go-tools/issues/1616 + path: pkg/controller/priorityqueue/metrics\.go + # The following are being worked on to remove their exclusion. This list should be reduced or go away all together over time. + # If it is decided they will not be addressed they should be moved above this comment. + - path: (.+)\.go$ + text: Subprocess launch(ed with variable|ing should be audited) + - linters: + - gosec + path: (.+)\.go$ + text: (G204|G104|G307) + - linters: + - staticcheck + path: (.+)\.go$ + text: (ST1000|QF1008) +issues: + max-issues-per-linter: 0 + max-same-issues: 0 +formatters: + enable: + - gofmt + - goimports + exclusions: + generated: strict + paths: + - zz_generated.*\.go$ + - .*conversion.*\.go$ diff --git a/.gomodcheck.yaml b/.gomodcheck.yaml new file mode 100644 index 0000000000..3608de331d --- /dev/null +++ b/.gomodcheck.yaml @@ -0,0 +1,17 @@ +upstreamRefs: + - k8s.io/api + - k8s.io/apiextensions-apiserver + - k8s.io/apimachinery + - k8s.io/apiserver + - k8s.io/client-go + - k8s.io/component-base + # k8s.io/klog/v2 -> conflicts with k/k deps + # k8s.io/utils -> conflicts with k/k deps + +excludedModules: + # --- test dependencies: + - github.com/onsi/ginkgo/v2 + - github.com/onsi/gomega + + # --- We want a newer version with generics support for this + - github.com/google/btree diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 1562e222fa..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,31 +0,0 @@ -language: go - -os: - - linux - - osx - -cache: - directories: - - $HOME/.cache/go-build - - $GOPATH/pkg/mod - -go: -- "1.12" - -git: - depth: 3 - -go_import_path: sigs.k8s.io/controller-runtime - -install: -- go get -u github.com/golang/dep/cmd/dep -#- go get -u golang.org/x/lint/golint -- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.15.0 - -script: -- GO111MODULE=on TRACE=1 ./hack/check-everything.sh - - -# TBD. Suppressing for now. -notifications: - email: false diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index feecd43eac..2c0ea1f667 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -16,14 +16,4 @@ Please see https://git.k8s.io/community/CLA.md for more info ## Test locally -1. Setup tools - ```bash - $ go get -u github.com/golang/dep/cmd/dep - $ go get -u gopkg.in/alecthomas/gometalinter.v2 - $ gometalinter.v2 --install # if can't load package, refer: https://github.com/alecthomas/gometalinter/issues/404 - ``` -1. Test - ```bash - TRACE=1 ./hack/check-everything.sh - ``` - +Run the command `make test` to test the changes locally. diff --git a/FAQ.md b/FAQ.md index ab926e7e3b..9c36c8112e 100644 --- a/FAQ.md +++ b/FAQ.md @@ -4,13 +4,13 @@ **A**: Each controller should only reconcile one object type. Other affected objects should be mapped to a single type of root object, using -the `EnqueueRequestForOwner` or `EnqueueRequestsFromMapFunc` event -handlers, and potentially indicies. Then, your Reconcile method should +the `handler.EnqueueRequestForOwner` or `handler.EnqueueRequestsFromMapFunc` event +handlers, and potentially indices. Then, your Reconcile method should attempt to reconcile *all* state for that given root objects. ### Q: How do I have different logic in my reconciler for different types of events (e.g. create, update, delete)? -**A**: You should not. Reconcile functions should be idempotent, and +**A**: You should not. Reconcile functions should be idempotent, and should always reconcile state by reading all the state it needs, then writing updates. This allows your reconciler to correctly respond to generic events, adjust to skipped or coalesced events, and easily deal @@ -30,13 +30,13 @@ on your situation. take this approach: the StatefulSet controller appends a specific number to each pod that it creates, while the Deployment controller hashes the pod template spec and appends that. - + - In the few cases when you cannot take advantage of deterministic names (e.g. when using generateName), it may be useful in to track which actions you took, and assume that they need to be repeated if they don't occur after a given time (e.g. using a requeue result). This is what the ReplicaSet controller does. - + In general, write your controller with the assumption that information will eventually be correct, but may be slightly out of date. Make sure that your reconcile function enforces the entire state of the world each @@ -48,17 +48,17 @@ generally cover most circumstances. ### Q: Where's the fake client? How do I use it? **A**: The fake client -[exists](https://godoc.org/sigs.k8s.io/controller-runtime/pkg/client/fake), +[exists](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake), but we generally recommend using -[envtest.Environment](https://godoc.org/sigs.k8s.io/controller-runtime/pkg/envtest#Environment) +[envtest.Environment](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#Environment) to test against a real API server. In our experience, tests using fake clients gradually re-implement poorly-written impressions of a real API server, which leads to hard-to-maintain, complex test code. -### Q: How should I write tests? Any suggestions for getting started? +### Q: How should I write tests? Any suggestions for getting started? - Use the aforementioned - [envtest.Environment](https://godoc.org/sigs.k8s.io/controller-runtime/pkg/envtest#Environment) + [envtest.Environment](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#Environment) to spin up a real API server instead of trying to mock one out. - Structure your tests to check that the state of the world is as you @@ -77,5 +77,5 @@ mapping between Go types and group-version-kinds in Kubernetes. In general, your application should have its own Scheme containing the types from the API groups that it needs (be they Kubernetes types or your own). See the [scheme builder -docs](https://godoc.org/sigs.k8s.io/controller-runtime/pkg/scheme) for +docs](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/scheme) for more information. diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index dbd73bbbc9..0000000000 --- a/Gopkg.lock +++ /dev/null @@ -1,985 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - digest = "1:5c3894b2aa4d6bead0ceeea6831b305d62879c871780e7b76296ded1b004bc57" - name = "cloud.google.com/go" - packages = ["compute/metadata"] - pruneopts = "UT" - revision = "64a2037ec6be8a4b0c1d1f706ed35b428b989239" - version = "v0.26.0" - -[[projects]] - digest = "1:327b9226c8ea5f1cd9952ba859bb7c335cab40fd8781c4a790ef259b0c5fbc40" - name = "github.com/Azure/go-autorest" - packages = [ - "autorest", - "autorest/adal", - "autorest/azure", - "autorest/date", - "logger", - "version", - ] - pruneopts = "UT" - revision = "39013ecb48eaf6ced3f4e3e1d95515140ce6b3cf" - version = "v10.15.2" - -[[projects]] - digest = "1:d1665c44bd5db19aaee18d1b6233c99b0b9a986e8bccb24ef54747547a48027f" - name = "github.com/PuerkitoBio/purell" - packages = ["."] - pruneopts = "UT" - revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4" - version = "v1.1.0" - -[[projects]] - branch = "master" - digest = "1:c739832d67eb1e9cc478a19cc1a1ccd78df0397bf8a32978b759152e205f644b" - name = "github.com/PuerkitoBio/urlesc" - packages = ["."] - pruneopts = "UT" - revision = "de5bf2ad457846296e2031421a34e2568e304e35" - -[[projects]] - digest = "1:464aef731a5f82ded547c62e249a2e9ec59fbbc9ddab53cda7b9857852630a61" - name = "github.com/appscode/jsonpatch" - packages = ["."] - pruneopts = "UT" - revision = "7c0e3b262f30165a8ec3d0b4c6059fd92703bfb2" - version = "1.0.0" - -[[projects]] - branch = "master" - digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" - name = "github.com/beorn7/perks" - packages = ["quantile"] - pruneopts = "UT" - revision = "3a771d992973f24aa725d07868b467d1ddfceafb" - -[[projects]] - digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "UT" - revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" - version = "v1.1.1" - -[[projects]] - digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55" - name = "github.com/dgrijalva/jwt-go" - packages = ["."] - pruneopts = "UT" - revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" - version = "v3.2.0" - -[[projects]] - digest = "1:899234af23e5793c34e06fd397f86ba33af5307b959b6a7afd19b63db065a9d7" - name = "github.com/emicklei/go-restful" - packages = [ - ".", - "log", - ] - pruneopts = "UT" - revision = "3eb9738c1697594ea6e71a7156a9bb32ed216cf0" - version = "v2.8.0" - -[[projects]] - digest = "1:f1f2bd73c025d24c3b93abf6364bccb802cf2fdedaa44360804c67800e8fab8d" - name = "github.com/evanphx/json-patch" - packages = ["."] - pruneopts = "UT" - revision = "72bf35d0ff611848c1dc9df0f976c81192392fa5" - version = "v4.1.0" - -[[projects]] - branch = "master" - digest = "1:99f994f4c2f71aeea1810d6061f3b52f3d2ee0084cfcb1ebbf40e00a0d0666f6" - name = "github.com/go-logr/logr" - packages = [ - ".", - "testing", - ] - pruneopts = "UT" - revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e" - -[[projects]] - digest = "1:ce43ad4015e7cdad3f0e8f2c8339439dd4470859a828d2a6988b0f713699e94a" - name = "github.com/go-logr/zapr" - packages = ["."] - pruneopts = "UT" - revision = "7536572e8d55209135cd5e7ccf7fce43dca217ab" - -[[projects]] - digest = "1:2997679181d901ac8aaf4330d11138ecf3974c6d3334995ff36f20cbd597daf8" - name = "github.com/go-openapi/jsonpointer" - packages = ["."] - pruneopts = "UT" - revision = "3a0015ad55fa9873f41605d3e8f28cd279c32ab2" - version = "0.16.0" - -[[projects]] - digest = "1:1ae3f233d75a731b164ca9feafd8ed646cbedf1784095876ed6988ce8aa88b1f" - name = "github.com/go-openapi/jsonreference" - packages = ["."] - pruneopts = "UT" - revision = "3fb327e6747da3043567ee86abd02bb6376b6be2" - version = "0.16.0" - -[[projects]] - digest = "1:3e5bdbd2a071c72c778c28fd7b5dfde95cdfbcef412f364377e031877205e418" - name = "github.com/go-openapi/spec" - packages = ["."] - pruneopts = "UT" - revision = "384415f06ee238aae1df5caad877de6ceac3a5c4" - version = "0.16.0" - -[[projects]] - digest = "1:c80984d4a9bb79539743aff5af91b595d84f513700150b0ed73c1697d1200d54" - name = "github.com/go-openapi/swag" - packages = ["."] - pruneopts = "UT" - revision = "becd2f08beafcca035645a8a101e0e3e18140458" - version = "0.16.0" - -[[projects]] - digest = "1:34e709f36fd4f868fb00dbaf8a6cab4c1ae685832d392874ba9d7c5dec2429d1" - name = "github.com/gogo/protobuf" - packages = [ - "proto", - "sortkeys", - ] - pruneopts = "UT" - revision = "636bf0302bc95575d69441b25a2603156ffdddf1" - version = "v1.1.1" - -[[projects]] - branch = "master" - digest = "1:3fb07f8e222402962fa190eb060608b34eddfb64562a18e2167df2de0ece85d8" - name = "github.com/golang/groupcache" - packages = ["lru"] - pruneopts = "UT" - revision = "24b0969c4cb722950103eed87108c8d291a8df00" - -[[projects]] - digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf" - name = "github.com/golang/protobuf" - packages = [ - "proto", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp", - ] - pruneopts = "UT" - revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" - version = "v1.2.0" - -[[projects]] - branch = "master" - digest = "1:3ee90c0d94da31b442dde97c99635aaafec68d0b8a3c12ee2075c6bdabeec6bb" - name = "github.com/google/gofuzz" - packages = ["."] - pruneopts = "UT" - revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" - -[[projects]] - digest = "1:65c4414eeb350c47b8de71110150d0ea8a281835b1f386eacaa3ad7325929c21" - name = "github.com/googleapis/gnostic" - packages = [ - "OpenAPIv2", - "compiler", - "extensions", - ] - pruneopts = "UT" - revision = "7c663266750e7d82587642f65e60bc4083f1f84e" - version = "v0.2.0" - -[[projects]] - branch = "master" - digest = "1:349090dc3e864fe83388be8e8949df6e22607c477644abcc3b530d1b2c7bb714" - name = "github.com/gophercloud/gophercloud" - packages = [ - ".", - "openstack", - "openstack/identity/v2/tenants", - "openstack/identity/v2/tokens", - "openstack/identity/v3/tokens", - "openstack/utils", - "pagination", - ] - pruneopts = "UT" - revision = "199db9a3a2bfa55802ab074ab8fed67fce2a2791" - -[[projects]] - branch = "master" - digest = "1:cf296baa185baae04a9a7004efee8511d08e2f5f51d4cbe5375da89722d681db" - name = "github.com/hashicorp/golang-lru" - packages = [ - ".", - "simplelru", - ] - pruneopts = "UT" - revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3" - -[[projects]] - digest = "1:a1038ef593beb4771c8f0f9c26e8b00410acd800af5c6864651d9bf160ea1813" - name = "github.com/hpcloud/tail" - packages = [ - ".", - "ratelimiter", - "util", - "watch", - "winfile", - ] - pruneopts = "UT" - revision = "a30252cb686a21eb2d0b98132633053ec2f7f1e5" - version = "v1.0.0" - -[[projects]] - digest = "1:8eb1de8112c9924d59bf1d3e5c26f5eaa2bfc2a5fcbb92dc1c2e4546d695f277" - name = "github.com/imdario/mergo" - packages = ["."] - pruneopts = "UT" - revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4" - version = "v0.3.6" - -[[projects]] - digest = "1:3e551bbb3a7c0ab2a2bf4660e7fcad16db089fdcfbb44b0199e62838038623ea" - name = "github.com/json-iterator/go" - packages = ["."] - pruneopts = "UT" - revision = "1624edc4454b8682399def8740d46db5e4362ba4" - version = "1.1.5" - -[[projects]] - branch = "master" - digest = "1:84a5a2b67486d5d67060ac393aa255d05d24ed5ee41daecd5635ec22657b6492" - name = "github.com/mailru/easyjson" - packages = [ - "buffer", - "jlexer", - "jwriter", - ] - pruneopts = "UT" - revision = "60711f1a8329503b04e1c88535f419d0bb440bff" - -[[projects]] - digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - pruneopts = "UT" - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" - -[[projects]] - digest = "1:33422d238f147d247752996a26574ac48dcf472976eda7f5134015f06bf16563" - name = "github.com/modern-go/concurrent" - packages = ["."] - pruneopts = "UT" - revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" - version = "1.0.3" - -[[projects]] - digest = "1:e32bdbdb7c377a07a9a46378290059822efdce5c8d96fe71940d87cb4f918855" - name = "github.com/modern-go/reflect2" - packages = ["."] - pruneopts = "UT" - revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" - version = "1.0.1" - -[[projects]] - digest = "1:42e29deef12327a69123b9cb2cb45fee4af5c12c2a23c6e477338279a052703f" - name = "github.com/onsi/ginkgo" - packages = [ - ".", - "config", - "internal/codelocation", - "internal/containernode", - "internal/failer", - "internal/leafnodes", - "internal/remote", - "internal/spec", - "internal/spec_iterator", - "internal/specrunner", - "internal/suite", - "internal/testingtproxy", - "internal/writer", - "reporters", - "reporters/stenographer", - "reporters/stenographer/support/go-colorable", - "reporters/stenographer/support/go-isatty", - "types", - ] - pruneopts = "UT" - revision = "3774a09d95489ccaa16032e0770d08ea77ba6184" - version = "v1.6.0" - -[[projects]] - digest = "1:58418daa018f162c520512737c19ecd42582524b0f900f2eee7c212ce55cf0da" - name = "github.com/onsi/gomega" - packages = [ - ".", - "format", - "gbytes", - "gexec", - "internal/assertion", - "internal/asyncassertion", - "internal/oraclematcher", - "internal/testingtsupport", - "matchers", - "matchers/support/goraph/bipartitegraph", - "matchers/support/goraph/edge", - "matchers/support/goraph/node", - "matchers/support/goraph/util", - "types", - ] - pruneopts = "UT" - revision = "b6ea1ea48f981d0f615a154a45eabb9dd466556d" - version = "v1.4.1" - -[[projects]] - digest = "1:361de06aa7ae272616cbe71c3994a654cc6316324e30998e650f7765b20c5b33" - name = "github.com/pborman/uuid" - packages = ["."] - pruneopts = "UT" - revision = "e790cca94e6cc75c7064b1332e63811d4aae1a53" - version = "v1.1" - -[[projects]] - digest = "1:20a8a18488bdef471795af0413d9a3c9c35dc24ca93342329b884ba3c15cbaab" - name = "github.com/prometheus/client_golang" - packages = [ - "prometheus", - "prometheus/internal", - "prometheus/promhttp", - ] - pruneopts = "UT" - revision = "1cafe34db7fdec6022e17e00e1c1ea501022f3e4" - version = "v0.9.0" - -[[projects]] - branch = "master" - digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" - name = "github.com/prometheus/client_model" - packages = ["go"] - pruneopts = "UT" - revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" - -[[projects]] - branch = "master" - digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "model", - ] - pruneopts = "UT" - revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" - -[[projects]] - branch = "master" - digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290" - name = "github.com/prometheus/procfs" - packages = [ - ".", - "internal/util", - "nfs", - "xfs", - ] - pruneopts = "UT" - revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92" - -[[projects]] - digest = "1:dab83a1bbc7ad3d7a6ba1a1cc1760f25ac38cdf7d96a5cdd55cd915a4f5ceaf9" - name = "github.com/spf13/pflag" - packages = ["."] - pruneopts = "UT" - revision = "9a97c102cda95a86cec2345a6f09f55a939babf5" - version = "v1.0.2" - -[[projects]] - digest = "1:3c1a69cdae3501bf75e76d0d86dc6f2b0a7421bc205c0cb7b96b19eed464a34d" - name = "go.uber.org/atomic" - packages = ["."] - pruneopts = "UT" - revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289" - version = "v1.3.2" - -[[projects]] - digest = "1:60bf2a5e347af463c42ed31a493d817f8a72f102543060ed992754e689805d1a" - name = "go.uber.org/multierr" - packages = ["."] - pruneopts = "UT" - revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" - version = "v1.1.0" - -[[projects]] - digest = "1:c52caf7bd44f92e54627a31b85baf06a68333a196b3d8d241480a774733dcf8b" - name = "go.uber.org/zap" - packages = [ - ".", - "buffer", - "internal/bufferpool", - "internal/color", - "internal/exit", - "zapcore", - ] - pruneopts = "UT" - revision = "ff33455a0e382e8a81d14dd7c922020b6b5e7982" - version = "v1.9.1" - -[[projects]] - branch = "master" - digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8" - name = "golang.org/x/crypto" - packages = ["ssh/terminal"] - pruneopts = "UT" - revision = "614d502a4dac94afa3a6ce146bd1736da82514c6" - -[[projects]] - branch = "master" - digest = "1:ff58bc124488348ba2ca02f62de899d07ba3e7dce51da02557bfc9a8052608db" - name = "golang.org/x/net" - packages = [ - "context", - "context/ctxhttp", - "html", - "html/atom", - "html/charset", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - ] - pruneopts = "UT" - revision = "922f4815f713f213882e8ef45e0d315b164d705c" - -[[projects]] - branch = "master" - digest = "1:f645667d687fc8bf228865a2c5455824ef05bad08841e673673ef2bb89ac5b90" - name = "golang.org/x/oauth2" - packages = [ - ".", - "google", - "internal", - "jws", - "jwt", - ] - pruneopts = "UT" - revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9" - -[[projects]] - branch = "master" - digest = "1:7710ea76bd73d3154e86eaf2db8b36a6dffbd6114e4e37b516f1d232c03ddd8d" - name = "golang.org/x/sys" - packages = [ - "unix", - "windows", - ] - pruneopts = "UT" - revision = "11551d06cbcc94edc80a0facaccbda56473c19c1" - -[[projects]] - digest = "1:bb8277a2ca2bcad6ff7f413b939375924099be908cedd1314baa21ecd08df477" - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "encoding", - "encoding/charmap", - "encoding/htmlindex", - "encoding/internal", - "encoding/internal/identifier", - "encoding/japanese", - "encoding/korean", - "encoding/simplifiedchinese", - "encoding/traditionalchinese", - "encoding/unicode", - "internal/colltab", - "internal/gen", - "internal/tag", - "internal/triegen", - "internal/ucd", - "internal/utf8internal", - "language", - "runes", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - "width", - ] - pruneopts = "UT" - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" - -[[projects]] - branch = "master" - digest = "1:c9e7a4b4d47c0ed205d257648b0e5b0440880cb728506e318f8ac7cd36270bc4" - name = "golang.org/x/time" - packages = ["rate"] - pruneopts = "UT" - revision = "fbb02b2291d28baffd63558aa44b4b56f178d650" - -[[projects]] - digest = "1:c8907869850adaa8bd7631887948d0684f3787d0912f1c01ab72581a6c34432e" - name = "google.golang.org/appengine" - packages = [ - ".", - "internal", - "internal/app_identity", - "internal/base", - "internal/datastore", - "internal/log", - "internal/modules", - "internal/remote_api", - "internal/urlfetch", - "urlfetch", - ] - pruneopts = "UT" - revision = "b1f26356af11148e710935ed1ac8a7f5702c7612" - version = "v1.1.0" - -[[projects]] - digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd" - name = "gopkg.in/fsnotify.v1" - packages = ["."] - pruneopts = "UT" - revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" - source = "https://github.com/fsnotify/fsnotify.git" - version = "v1.4.7" - -[[projects]] - digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" - name = "gopkg.in/inf.v0" - packages = ["."] - pruneopts = "UT" - revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" - version = "v0.9.1" - -[[projects]] - branch = "v1" - digest = "1:0caa92e17bc0b65a98c63e5bc76a9e844cd5e56493f8fdbb28fad101a16254d9" - name = "gopkg.in/tomb.v1" - packages = ["."] - pruneopts = "UT" - revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8" - -[[projects]] - digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202" - name = "gopkg.in/yaml.v2" - packages = ["."] - pruneopts = "UT" - revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" - version = "v2.2.1" - -[[projects]] - digest = "1:baf5a7da864b8f493ff372a9486c2d6a7fae8363699af2dc0ef6dba9a869928f" - name = "k8s.io/api" - packages = [ - "admission/v1beta1", - "admissionregistration/v1beta1", - "apps/v1", - "apps/v1beta1", - "apps/v1beta2", - "auditregistration/v1alpha1", - "authentication/v1", - "authentication/v1beta1", - "authorization/v1", - "authorization/v1beta1", - "autoscaling/v1", - "autoscaling/v2beta1", - "autoscaling/v2beta2", - "batch/v1", - "batch/v1beta1", - "batch/v2alpha1", - "certificates/v1beta1", - "coordination/v1", - "coordination/v1beta1", - "core/v1", - "events/v1beta1", - "extensions/v1beta1", - "networking/v1", - "networking/v1beta1", - "node/v1alpha1", - "node/v1beta1", - "policy/v1beta1", - "rbac/v1", - "rbac/v1alpha1", - "rbac/v1beta1", - "scheduling/v1", - "scheduling/v1alpha1", - "scheduling/v1beta1", - "settings/v1alpha1", - "storage/v1", - "storage/v1alpha1", - "storage/v1beta1", - ] - pruneopts = "UT" - revision = "6e4e0e4f393bf5e8bbff570acd13217aa5a770cd" - version = "kubernetes-1.14.1" - -[[projects]] - digest = "1:65226935ef0a84716e327aa46c5a11cf2158ca6d0c79b379202d0b2c2be0e9bc" - name = "k8s.io/apiextensions-apiserver" - packages = [ - "pkg/apis/apiextensions", - "pkg/apis/apiextensions/v1beta1", - "pkg/client/clientset/clientset", - "pkg/client/clientset/clientset/scheme", - "pkg/client/clientset/clientset/typed/apiextensions/v1beta1", - ] - pruneopts = "UT" - revision = "727a075fdec8319bf095330e344b3ccc668abc73" - version = "kubernetes-1.14.1" - -[[projects]] - digest = "1:2616411ee46c05b1488f127c171e9204695d15d1e654fe79ea2d204f96f28070" - name = "k8s.io/apimachinery" - packages = [ - "pkg/api/errors", - "pkg/api/meta", - "pkg/api/resource", - "pkg/apis/meta/internalversion", - "pkg/apis/meta/v1", - "pkg/apis/meta/v1/unstructured", - "pkg/apis/meta/v1beta1", - "pkg/conversion", - "pkg/conversion/queryparams", - "pkg/fields", - "pkg/labels", - "pkg/runtime", - "pkg/runtime/schema", - "pkg/runtime/serializer", - "pkg/runtime/serializer/json", - "pkg/runtime/serializer/protobuf", - "pkg/runtime/serializer/recognizer", - "pkg/runtime/serializer/streaming", - "pkg/runtime/serializer/versioning", - "pkg/selection", - "pkg/types", - "pkg/util/cache", - "pkg/util/clock", - "pkg/util/diff", - "pkg/util/errors", - "pkg/util/framer", - "pkg/util/intstr", - "pkg/util/json", - "pkg/util/mergepatch", - "pkg/util/naming", - "pkg/util/net", - "pkg/util/runtime", - "pkg/util/sets", - "pkg/util/strategicpatch", - "pkg/util/uuid", - "pkg/util/validation", - "pkg/util/validation/field", - "pkg/util/wait", - "pkg/util/yaml", - "pkg/version", - "pkg/watch", - "third_party/forked/golang/json", - "third_party/forked/golang/reflect", - ] - pruneopts = "UT" - revision = "6a84e37a896db9780c75367af8d2ed2bb944022e" - version = "kubernetes-1.14.1" - -[[projects]] - digest = "1:00f288473b016529887e152dad2465482cad64b233298d58f1526385505b27db" - name = "k8s.io/client-go" - packages = [ - "discovery", - "dynamic", - "informers", - "informers/admissionregistration", - "informers/admissionregistration/v1beta1", - "informers/apps", - "informers/apps/v1", - "informers/apps/v1beta1", - "informers/apps/v1beta2", - "informers/auditregistration", - "informers/auditregistration/v1alpha1", - "informers/autoscaling", - "informers/autoscaling/v1", - "informers/autoscaling/v2beta1", - "informers/autoscaling/v2beta2", - "informers/batch", - "informers/batch/v1", - "informers/batch/v1beta1", - "informers/batch/v2alpha1", - "informers/certificates", - "informers/certificates/v1beta1", - "informers/coordination", - "informers/coordination/v1", - "informers/coordination/v1beta1", - "informers/core", - "informers/core/v1", - "informers/events", - "informers/events/v1beta1", - "informers/extensions", - "informers/extensions/v1beta1", - "informers/internalinterfaces", - "informers/networking", - "informers/networking/v1", - "informers/networking/v1beta1", - "informers/node", - "informers/node/v1alpha1", - "informers/node/v1beta1", - "informers/policy", - "informers/policy/v1beta1", - "informers/rbac", - "informers/rbac/v1", - "informers/rbac/v1alpha1", - "informers/rbac/v1beta1", - "informers/scheduling", - "informers/scheduling/v1", - "informers/scheduling/v1alpha1", - "informers/scheduling/v1beta1", - "informers/settings", - "informers/settings/v1alpha1", - "informers/storage", - "informers/storage/v1", - "informers/storage/v1alpha1", - "informers/storage/v1beta1", - "kubernetes", - "kubernetes/scheme", - "kubernetes/typed/admissionregistration/v1beta1", - "kubernetes/typed/apps/v1", - "kubernetes/typed/apps/v1beta1", - "kubernetes/typed/apps/v1beta2", - "kubernetes/typed/auditregistration/v1alpha1", - "kubernetes/typed/authentication/v1", - "kubernetes/typed/authentication/v1beta1", - "kubernetes/typed/authorization/v1", - "kubernetes/typed/authorization/v1beta1", - "kubernetes/typed/autoscaling/v1", - "kubernetes/typed/autoscaling/v2beta1", - "kubernetes/typed/autoscaling/v2beta2", - "kubernetes/typed/batch/v1", - "kubernetes/typed/batch/v1beta1", - "kubernetes/typed/batch/v2alpha1", - "kubernetes/typed/certificates/v1beta1", - "kubernetes/typed/coordination/v1", - "kubernetes/typed/coordination/v1beta1", - "kubernetes/typed/core/v1", - "kubernetes/typed/events/v1beta1", - "kubernetes/typed/extensions/v1beta1", - "kubernetes/typed/networking/v1", - "kubernetes/typed/networking/v1beta1", - "kubernetes/typed/node/v1alpha1", - "kubernetes/typed/node/v1beta1", - "kubernetes/typed/policy/v1beta1", - "kubernetes/typed/rbac/v1", - "kubernetes/typed/rbac/v1alpha1", - "kubernetes/typed/rbac/v1beta1", - "kubernetes/typed/scheduling/v1", - "kubernetes/typed/scheduling/v1alpha1", - "kubernetes/typed/scheduling/v1beta1", - "kubernetes/typed/settings/v1alpha1", - "kubernetes/typed/storage/v1", - "kubernetes/typed/storage/v1alpha1", - "kubernetes/typed/storage/v1beta1", - "listers/admissionregistration/v1beta1", - "listers/apps/v1", - "listers/apps/v1beta1", - "listers/apps/v1beta2", - "listers/auditregistration/v1alpha1", - "listers/autoscaling/v1", - "listers/autoscaling/v2beta1", - "listers/autoscaling/v2beta2", - "listers/batch/v1", - "listers/batch/v1beta1", - "listers/batch/v2alpha1", - "listers/certificates/v1beta1", - "listers/coordination/v1", - "listers/coordination/v1beta1", - "listers/core/v1", - "listers/events/v1beta1", - "listers/extensions/v1beta1", - "listers/networking/v1", - "listers/networking/v1beta1", - "listers/node/v1alpha1", - "listers/node/v1beta1", - "listers/policy/v1beta1", - "listers/rbac/v1", - "listers/rbac/v1alpha1", - "listers/rbac/v1beta1", - "listers/scheduling/v1", - "listers/scheduling/v1alpha1", - "listers/scheduling/v1beta1", - "listers/settings/v1alpha1", - "listers/storage/v1", - "listers/storage/v1alpha1", - "listers/storage/v1beta1", - "pkg/apis/clientauthentication", - "pkg/apis/clientauthentication/v1alpha1", - "pkg/apis/clientauthentication/v1beta1", - "pkg/version", - "plugin/pkg/client/auth", - "plugin/pkg/client/auth/azure", - "plugin/pkg/client/auth/exec", - "plugin/pkg/client/auth/gcp", - "plugin/pkg/client/auth/oidc", - "plugin/pkg/client/auth/openstack", - "rest", - "rest/watch", - "restmapper", - "testing", - "third_party/forked/golang/template", - "tools/auth", - "tools/cache", - "tools/clientcmd", - "tools/clientcmd/api", - "tools/clientcmd/api/latest", - "tools/clientcmd/api/v1", - "tools/leaderelection", - "tools/leaderelection/resourcelock", - "tools/metrics", - "tools/pager", - "tools/record", - "tools/record/util", - "tools/reference", - "transport", - "util/cert", - "util/connrotation", - "util/flowcontrol", - "util/homedir", - "util/jsonpath", - "util/keyutil", - "util/retry", - "util/workqueue", - ] - pruneopts = "UT" - revision = "1a26190bd76a9017e289958b9fba936430aa3704" - version = "kubernetes-1.14.1" - -[[projects]] - digest = "1:e2999bf1bb6eddc2a6aa03fe5e6629120a53088926520ca3b4765f77d7ff7eab" - name = "k8s.io/klog" - packages = ["."] - pruneopts = "UT" - revision = "a5bc97fbc634d635061f3146511332c7e313a55a" - version = "v0.1.0" - -[[projects]] - branch = "master" - digest = "1:0dac10b488f2671c15738b662478d665b0c0ec7cae3362669dec172147331ce5" - name = "k8s.io/kube-openapi" - packages = [ - "pkg/common", - "pkg/util/proto", - ] - pruneopts = "UT" - revision = "e3762e86a74c878ffed47484592986685639c2cd" - -[[projects]] - branch = "master" - digest = "1:14e8a3b53e6d8cb5f44783056b71bb2ca1ac7e333939cc97f3e50b579c920845" - name = "k8s.io/utils" - packages = [ - "buffer", - "integer", - "trace", - ] - pruneopts = "UT" - revision = "21c4ce38f2a793ec01e925ddc31216500183b773" - -[[projects]] - digest = "1:9070222ca967d09b3966552a161dd4420d62315964bf5e1efd8cc4c7c30ebca8" - name = "sigs.k8s.io/testing_frameworks" - packages = [ - "integration", - "integration/addr", - "integration/internal", - ] - pruneopts = "UT" - revision = "d348cb12705b516376e0c323bacca72b00a78425" - version = "v0.1.1" - -[[projects]] - digest = "1:7719608fe0b52a4ece56c2dde37bedd95b938677d1ab0f84b8a7852e4c59f849" - name = "sigs.k8s.io/yaml" - packages = ["."] - pruneopts = "UT" - revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480" - version = "v1.1.0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/appscode/jsonpatch", - "github.com/emicklei/go-restful", - "github.com/evanphx/json-patch", - "github.com/go-logr/logr", - "github.com/go-logr/logr/testing", - "github.com/go-logr/zapr", - "github.com/go-openapi/spec", - "github.com/onsi/ginkgo", - "github.com/onsi/ginkgo/config", - "github.com/onsi/ginkgo/types", - "github.com/onsi/gomega", - "github.com/prometheus/client_golang/prometheus", - "github.com/prometheus/client_golang/prometheus/promhttp", - "github.com/prometheus/client_model/go", - "github.com/spf13/pflag", - "go.uber.org/zap", - "go.uber.org/zap/buffer", - "go.uber.org/zap/zapcore", - "gopkg.in/fsnotify.v1", - "k8s.io/api/admission/v1beta1", - "k8s.io/api/apps/v1", - "k8s.io/api/apps/v1beta1", - "k8s.io/api/autoscaling/v1", - "k8s.io/api/core/v1", - "k8s.io/api/extensions/v1beta1", - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", - "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset", - "k8s.io/apimachinery/pkg/api/errors", - "k8s.io/apimachinery/pkg/api/meta", - "k8s.io/apimachinery/pkg/apis/meta/v1", - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", - "k8s.io/apimachinery/pkg/fields", - "k8s.io/apimachinery/pkg/labels", - "k8s.io/apimachinery/pkg/runtime", - "k8s.io/apimachinery/pkg/runtime/schema", - "k8s.io/apimachinery/pkg/runtime/serializer", - "k8s.io/apimachinery/pkg/selection", - "k8s.io/apimachinery/pkg/types", - "k8s.io/apimachinery/pkg/util/json", - "k8s.io/apimachinery/pkg/util/runtime", - "k8s.io/apimachinery/pkg/util/sets", - "k8s.io/apimachinery/pkg/util/uuid", - "k8s.io/apimachinery/pkg/util/wait", - "k8s.io/apimachinery/pkg/util/yaml", - "k8s.io/apimachinery/pkg/watch", - "k8s.io/client-go/discovery", - "k8s.io/client-go/dynamic", - "k8s.io/client-go/informers", - "k8s.io/client-go/kubernetes", - "k8s.io/client-go/kubernetes/scheme", - "k8s.io/client-go/kubernetes/typed/core/v1", - "k8s.io/client-go/plugin/pkg/client/auth", - "k8s.io/client-go/plugin/pkg/client/auth/gcp", - "k8s.io/client-go/rest", - "k8s.io/client-go/restmapper", - "k8s.io/client-go/testing", - "k8s.io/client-go/tools/cache", - "k8s.io/client-go/tools/clientcmd", - "k8s.io/client-go/tools/leaderelection", - "k8s.io/client-go/tools/leaderelection/resourcelock", - "k8s.io/client-go/tools/metrics", - "k8s.io/client-go/tools/record", - "k8s.io/client-go/tools/reference", - "k8s.io/client-go/util/workqueue", - "k8s.io/kube-openapi/pkg/common", - "sigs.k8s.io/testing_frameworks/integration", - "sigs.k8s.io/testing_frameworks/integration/addr", - "sigs.k8s.io/yaml", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index 81598f106b..0000000000 --- a/Gopkg.toml +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright 2018 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Packages required by users -# NB(directxman12): be very careful how you word these -- -# dep considers them as a dependency on the package you list -# meaning that if there's a main.go in the root package -# (like in apiextensions-apiserver), you'll pull it's deps in. -required = ["sigs.k8s.io/testing_frameworks/integration", - "k8s.io/client-go/plugin/pkg/client/auth", - "github.com/spf13/pflag", - "github.com/emicklei/go-restful", - "github.com/go-openapi/spec", - "k8s.io/kube-openapi/pkg/common", - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", - "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset", - "github.com/prometheus/client_golang/prometheus", - ] - -[[constraint]] - name = "k8s.io/api" - version = "kubernetes-1.14.1" - -[[constraint]] - name = "k8s.io/apiextensions-apiserver" - version = "kubernetes-1.14.1" - -[[constraint]] - name = "k8s.io/apimachinery" - version = "kubernetes-1.14.1" - -[[constraint]] - name = "k8s.io/client-go" - version = "kubernetes-1.14.1" - -[[constraint]] - name = "sigs.k8s.io/testing_frameworks" - version = "v0.1.1" - -[[constraint]] - name = "github.com/onsi/ginkgo" - version = "v1.5.0" - -[[constraint]] - name = "github.com/onsi/gomega" - version = "v1.4.0" - -[[constraint]] - name = "sigs.k8s.io/yaml" - version = "1.1.0" - -[[constraint]] - name = "go.uber.org/zap" - version = "1.8.0" - -[[constraint]] - name = "github.com/prometheus/client_golang" - version = "0.9.0" - -# these are not listed explicitly until we get version tags, -# since dep doesn't like bare revision dependencies - -# [[constraint]] -# name = "github.com/go-logr/logr" -# -# [[constraint]] -# name = "github.com/go-logr/zapr" - -# For dependency below: Refer to issue https://github.com/golang/dep/issues/1799 -[[override]] -name = "gopkg.in/fsnotify.v1" -source = "https://github.com/fsnotify/fsnotify.git" -version="v1.4.7" - -[prune] - go-tests = true - unused-packages = true diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..b8e9cfa877 --- /dev/null +++ b/Makefile @@ -0,0 +1,218 @@ +#!/usr/bin/env bash + +# Copyright 2020 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# If you update this file, please follow +# https://suva.sh/posts/well-documented-makefiles + +## -------------------------------------- +## General +## -------------------------------------- + +SHELL:=/usr/bin/env bash +.DEFAULT_GOAL:=help + +# +# Go. +# +GO_VERSION ?= 1.24.0 + +# Use GOPROXY environment variable if set +GOPROXY := $(shell go env GOPROXY) +ifeq ($(GOPROXY),) +GOPROXY := https://proxy.golang.org +endif +export GOPROXY + +# Active module mode, as we use go modules to manage dependencies +export GO111MODULE=on + +# Hosts running SELinux need :z added to volume mounts +SELINUX_ENABLED := $(shell cat /sys/fs/selinux/enforce 2> /dev/null || echo 0) + +ifeq ($(SELINUX_ENABLED),1) + DOCKER_VOL_OPTS?=:z +endif + +# Tools. +TOOLS_DIR := hack/tools +TOOLS_BIN_DIR := $(abspath $(TOOLS_DIR)/bin) +GOLANGCI_LINT := $(abspath $(TOOLS_BIN_DIR)/golangci-lint) +GO_APIDIFF := $(TOOLS_BIN_DIR)/go-apidiff +CONTROLLER_GEN := $(TOOLS_BIN_DIR)/controller-gen +ENVTEST_DIR := $(abspath tools/setup-envtest) +SCRATCH_ENV_DIR := $(abspath examples/scratch-env) +GO_INSTALL := ./hack/go-install.sh + +# The help will print out all targets with their descriptions organized bellow their categories. The categories are represented by `##@` and the target descriptions by `##`. +# The awk commands is responsible to read the entire set of makefiles included in this invocation, looking for lines of the file as xyz: ## something, and then pretty-format the target and help. Then, if there's a line with ##@ something, that gets pretty-printed as a category. +# More info over the usage of ANSI control characters for terminal formatting: https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info over awk command: http://linuxcommand.org/lc3_adv_awk.php +.PHONY: help +help: ## Display this help + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +## -------------------------------------- +## Testing +## -------------------------------------- + +.PHONY: test +test: test-tools ## Run the script check-everything.sh which will check all. + TRACE=1 ./hack/check-everything.sh + +.PHONY: test-tools +test-tools: ## tests the tools codebase (setup-envtest) + cd tools/setup-envtest && go test ./... + +## -------------------------------------- +## Binaries +## -------------------------------------- + +GO_APIDIFF_VER := v0.8.2 +GO_APIDIFF_BIN := go-apidiff +GO_APIDIFF := $(abspath $(TOOLS_BIN_DIR)/$(GO_APIDIFF_BIN)-$(GO_APIDIFF_VER)) +GO_APIDIFF_PKG := github.com/joelanford/go-apidiff + +$(GO_APIDIFF): # Build go-apidiff from tools folder. + GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(GO_APIDIFF_PKG) $(GO_APIDIFF_BIN) $(GO_APIDIFF_VER) + +CONTROLLER_GEN_VER := v0.17.1 +CONTROLLER_GEN_BIN := controller-gen +CONTROLLER_GEN := $(abspath $(TOOLS_BIN_DIR)/$(CONTROLLER_GEN_BIN)-$(CONTROLLER_GEN_VER)) +CONTROLLER_GEN_PKG := sigs.k8s.io/controller-tools/cmd/controller-gen + +$(CONTROLLER_GEN): # Build controller-gen from tools folder. + GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(CONTROLLER_GEN_PKG) $(CONTROLLER_GEN_BIN) $(CONTROLLER_GEN_VER) + +GOLANGCI_LINT_BIN := golangci-lint +GOLANGCI_LINT_VER := $(shell cat .github/workflows/golangci-lint.yml | grep [[:space:]]version: | sed 's/.*version: //') +GOLANGCI_LINT := $(abspath $(TOOLS_BIN_DIR)/$(GOLANGCI_LINT_BIN)-$(GOLANGCI_LINT_VER)) +GOLANGCI_LINT_PKG := github.com/golangci/golangci-lint/v2/cmd/golangci-lint + +$(GOLANGCI_LINT): # Build golangci-lint from tools folder. + GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) $(GOLANGCI_LINT_PKG) $(GOLANGCI_LINT_BIN) $(GOLANGCI_LINT_VER) + +GO_MOD_CHECK_DIR := $(abspath ./hack/tools/cmd/gomodcheck) +GO_MOD_CHECK := $(abspath $(TOOLS_BIN_DIR)/gomodcheck) +GO_MOD_CHECK_IGNORE := $(abspath .gomodcheck.yaml) +.PHONY: $(GO_MOD_CHECK) +$(GO_MOD_CHECK): # Build gomodcheck + go build -C $(GO_MOD_CHECK_DIR) -o $(GO_MOD_CHECK) + +## -------------------------------------- +## Linting +## -------------------------------------- + +.PHONY: lint +lint: $(GOLANGCI_LINT) ## Lint codebase + $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS) + cd tools/setup-envtest; $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS) + +.PHONY: lint-fix +lint-fix: $(GOLANGCI_LINT) ## Lint the codebase and run auto-fixers if supported by the linter. + GOLANGCI_LINT_EXTRA_ARGS=--fix $(MAKE) lint + +## -------------------------------------- +## Generate +## -------------------------------------- + +.PHONY: modules +modules: ## Runs go mod to ensure modules are up to date. + go mod tidy + cd $(TOOLS_DIR); go mod tidy + cd $(ENVTEST_DIR); go mod tidy + cd $(SCRATCH_ENV_DIR); go mod tidy + +## -------------------------------------- +## Release +## -------------------------------------- + +RELEASE_DIR := tools/setup-envtest/out + +.PHONY: $(RELEASE_DIR) +$(RELEASE_DIR): + mkdir -p $(RELEASE_DIR)/ + +.PHONY: release +release: clean-release $(RELEASE_DIR) ## Build release. + @if ! [ -z "$$(git status --porcelain)" ]; then echo "Your local git repository contains uncommitted changes, use git clean before proceeding."; exit 1; fi + + # Build binaries first. + $(MAKE) release-binaries + +.PHONY: release-binaries +release-binaries: ## Build release binaries. + RELEASE_BINARY=setup-envtest-linux-amd64 GOOS=linux GOARCH=amd64 $(MAKE) release-binary + RELEASE_BINARY=setup-envtest-linux-arm64 GOOS=linux GOARCH=arm64 $(MAKE) release-binary + RELEASE_BINARY=setup-envtest-linux-ppc64le GOOS=linux GOARCH=ppc64le $(MAKE) release-binary + RELEASE_BINARY=setup-envtest-linux-s390x GOOS=linux GOARCH=s390x $(MAKE) release-binary + RELEASE_BINARY=setup-envtest-darwin-amd64 GOOS=darwin GOARCH=amd64 $(MAKE) release-binary + RELEASE_BINARY=setup-envtest-darwin-arm64 GOOS=darwin GOARCH=arm64 $(MAKE) release-binary + RELEASE_BINARY=setup-envtest-windows-amd64.exe GOOS=windows GOARCH=amd64 $(MAKE) release-binary + +.PHONY: release-binary +release-binary: $(RELEASE_DIR) + docker run \ + --rm \ + -e CGO_ENABLED=0 \ + -e GOOS=$(GOOS) \ + -e GOARCH=$(GOARCH) \ + -e GOCACHE=/tmp/ \ + --user $$(id -u):$$(id -g) \ + -v "$$(pwd):/workspace$(DOCKER_VOL_OPTS)" \ + -w /workspace/tools/setup-envtest \ + golang:$(GO_VERSION) \ + go build -a -trimpath -ldflags "-X 'sigs.k8s.io/controller-runtime/tools/setup-envtest/version.version=$(RELEASE_TAG)' -extldflags '-static'" \ + -o ./out/$(RELEASE_BINARY) ./ + +## -------------------------------------- +## Cleanup / Verification +## -------------------------------------- + +.PHONY: clean +clean: ## Cleanup. + $(GOLANGCI_LINT) cache clean + $(MAKE) clean-bin + +.PHONY: clean-bin +clean-bin: ## Remove all generated binaries. + rm -rf hack/tools/bin + +.PHONY: clean-release +clean-release: ## Remove the release folder + rm -rf $(RELEASE_DIR) + +.PHONY: verify-modules +verify-modules: modules $(GO_MOD_CHECK) ## Verify go modules are up to date + @if !(git diff --quiet HEAD -- go.sum go.mod $(TOOLS_DIR)/go.mod $(TOOLS_DIR)/go.sum $(ENVTEST_DIR)/go.mod $(ENVTEST_DIR)/go.sum $(SCRATCH_ENV_DIR)/go.sum); then \ + git diff; \ + echo "go module files are out of date, please run 'make modules'"; exit 1; \ + fi + $(GO_MOD_CHECK) $(GO_MOD_CHECK_IGNORE) + +APIDIFF_OLD_COMMIT ?= $(shell git rev-parse origin/main) + +.PHONY: apidiff +verify-apidiff: $(GO_APIDIFF) ## Check for API differences + $(GO_APIDIFF) $(APIDIFF_OLD_COMMIT) --print-compatible + +## -------------------------------------- +## Helpers +## -------------------------------------- + +##@ helpers: + +go-version: ## Print the go version we use to compile our binaries and images + @echo $(GO_VERSION) diff --git a/OWNERS b/OWNERS index 54ecc0f18d..9f2d296e4c 100644 --- a/OWNERS +++ b/OWNERS @@ -1,11 +1,11 @@ -# See the OWNERS docs: https://git.k8s.io/community/contributors/devel/owners.md -owners: - - directxman12 - - droot - - pwittrock +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md + approvers: - controller-runtime-admins - controller-runtime-maintainers + - controller-runtime-approvers reviewers: - controller-runtime-admins - controller-runtime-maintainers + - controller-runtime-approvers + - controller-runtime-reviewers diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 40e94ca6e3..47bf6eedf3 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -1,8 +1,39 @@ -# See the OWNERS docs: https://git.k8s.io/community/contributors/devel/owners.md +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md aliases: + # active folks who can be contacted to perform admin-related + # tasks on the repo, or otherwise approve any PRS. controller-runtime-admins: + - alvaroaleman + - joelanford + - sbueringer + - vincepri + + # non-admin folks who have write-access and can approve any PRs in the repo + controller-runtime-maintainers: + - alvaroaleman + - joelanford + - sbueringer + - vincepri + + # non-admin folks who can approve any PRs in the repo + controller-runtime-approvers: + - fillzpp + + # folks who can review and LGTM any PRs in the repo (doesn't + # include approvers & admins -- those count too via the OWNERS + # file) + controller-runtime-reviewers: + - varshaprasad96 + - inteon + - JoelSpeed + - troy0820 + + # folks who may have context on ancient history, + # but are no longer directly involved + controller-runtime-emeritus-maintainers: - directxman12 + controller-runtime-emeritus-admins: - droot + - mengqiy - pwittrock - controller-runtime-maintainers: [] diff --git a/README.md b/README.md index 0e02f8a9c7..54bacad42e 100644 --- a/README.md +++ b/README.md @@ -1,17 +1,23 @@ -[![Build Status](https://travis-ci.org/kubernetes-sigs/controller-runtime.svg?branch=master)](https://travis-ci.org/kubernetes-sigs/controller-runtime "Travis") [![Go Report Card](https://goreportcard.com/badge/sigs.k8s.io/controller-runtime)](https://goreportcard.com/report/sigs.k8s.io/controller-runtime) +[![godoc](https://pkg.go.dev/badge/sigs.k8s.io/controller-runtime)](https://pkg.go.dev/sigs.k8s.io/controller-runtime) # Kubernetes controller-runtime Project -The Kubernetes controller-runtime Project is a set of go libraries for building Controllers. +The Kubernetes controller-runtime Project is a set of go libraries for building +Controllers. It is leveraged by [Kubebuilder](https://book.kubebuilder.io/) and +[Operator SDK](https://github.com/operator-framework/operator-sdk). Both are +a great place to start for new projects. See +[Kubebuilder's Quick Start](https://book.kubebuilder.io/quick-start.html) to +see how it can be used. Documentation: -- [Package overview](https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg) -- [Basic controller using builder](https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/builder#example-Builder) -- [Creating a manager](https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/manager#example-New) -- [Creating a controller](https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/controller#example-New) -- [Examples](https://github.com/kubernetes-sigs/controller-runtime/blob/master/examples) +- [Package overview](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg) +- [Basic controller using builder](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/builder#example-Builder) +- [Creating a manager](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/manager#example-New) +- [Creating a controller](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/controller#example-New) +- [Examples](https://github.com/kubernetes-sigs/controller-runtime/blob/main/examples) +- [Designs](https://github.com/kubernetes-sigs/controller-runtime/blob/main/designs) # Versioning, Maintenance, and Compatibility @@ -19,9 +25,9 @@ The full documentation can be found at [VERSIONING.md](VERSIONING.md), but TL;DR Users: -- We follow [Semantic Versioning (semver)](https://semver.org) -- Use releases with your dependency management to ensure that you get compatible code -- The master branch contains all the latest code, some of which may break compatibility (so "normal" `go get` is not recommended) +- We stick to a zero major version +- We publish a minor version for each Kubernetes minor release and allow breaking changes between minor versions +- We publish patch versions as needed and we don't allow breaking changes in them Contributors: @@ -34,6 +40,27 @@ Contributors: * [Documentation Changes](/.github/PULL_REQUEST_TEMPLATE/docs.md) * [Test/Build/Other Changes](/.github/PULL_REQUEST_TEMPLATE/other.md) +## Compatibility + +Every minor version of controller-runtime has been tested with a specific minor version of client-go. A controller-runtime minor version *may* be compatible with +other client-go minor versions, but this is by chance and neither supported nor tested. In general, we create one minor version of controller-runtime +for each minor version of client-go and other k8s.io/* dependencies. + +The minimum Go version of controller-runtime is the highest minimum Go version of our Go dependencies. Usually, this will +be identical to the minimum Go version of the corresponding k8s.io/* dependencies. + +Compatible k8s.io/*, client-go and minimum Go versions can be looked up in our [go.mod](go.mod) file. + +| | k8s.io/*, client-go | minimum Go version | +|----------|:-------------------:|:------------------:| +| CR v0.21 | v0.33 | 1.24 | +| CR v0.20 | v0.32 | 1.23 | +| CR v0.19 | v0.31 | 1.22 | +| CR v0.18 | v0.30 | 1.22 | +| CR v0.17 | v0.29 | 1.21 | +| CR v0.16 | v0.28 | 1.20 | +| CR v0.15 | v0.27 | 1.20 | + ## FAQ See [FAQ.md](FAQ.md) @@ -42,15 +69,13 @@ See [FAQ.md](FAQ.md) Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). -controller-runtime is a subproject of the [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder) project -in sig apimachinery. - You can reach the maintainers of this project at: -- Slack channel: [#kubebuilder](http://slack.k8s.io/#kubebuilder) +- Slack channel: [#controller-runtime](https://kubernetes.slack.com/archives/C02MRBMN00Z) - Google Group: [kubebuilder@googlegroups.com](https://groups.google.com/forum/#!forum/kubebuilder) ## Contributing + Contributions are greatly appreciated. The maintainers actively manage the issues list, and try to highlight issues suitable for newcomers. The project follows the typical GitHub pull request model. See [CONTRIBUTING.md](CONTRIBUTING.md) for more details. Before starting any work, please either comment on an existing issue, or file a new one. @@ -58,4 +83,3 @@ Before starting any work, please either comment on an existing issue, or file a ## Code of conduct Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). - diff --git a/RELEASE.md b/RELEASE.md new file mode 100644 index 0000000000..2a857b976e --- /dev/null +++ b/RELEASE.md @@ -0,0 +1,51 @@ +# Release Process + +The Kubernetes controller-runtime Project is released on an as-needed basis. The process is as follows: + +**Note:** Releases are done from the `release-MAJOR.MINOR` branches. For PATCH releases is not required +to create a new branch you will just need to ensure that all big fixes are cherry-picked into the respective +`release-MAJOR.MINOR` branch. To know more about versioning check https://semver.org/. + +## How to do a release + +### Create the new branch and the release tag + +1. Create a new branch `git checkout -b release-` from main +2. Push the new branch to the remote repository + +### Now, let's generate the changelog + +1. Create the changelog from the new branch `release-` (`git checkout release-`). +You will need to use the [kubebuilder-release-tools][kubebuilder-release-tools] to generate the notes. See [here][release-notes-generation] + +> **Note** +> - You will need to have checkout locally from the remote repository the previous branch +> - Also, ensure that you fetch all tags from the remote `git fetch --all --tags` + +### Draft a new release from GitHub + +1. Create a new tag with the correct version from the new `release-` branch +2. Add the changelog on it and publish. Now, the code source is released ! + +### Add a new Prow test the for the new branch release + +1. Create a new prow test under [github.com/kubernetes/test-infra/tree/master/config/jobs/kubernetes-sigs/controller-runtime](https://github.com/kubernetes/test-infra/tree/master/config/jobs/kubernetes-sigs/controller-runtime) +for the new `release-` branch. (i.e. for the `0.11.0` release see the PR: https://github.com/kubernetes/test-infra/pull/25205) +2. Ping the infra PR in the controller-runtime slack channel for reviews. + +### Announce the new release: + +1. Publish on the Slack channel the new release, i.e: + +```` +:announce: Controller-Runtime v0.12.0 has been released! +This release includes a Kubernetes dependency bump to v1.24. +For more info, see the release page: https://github.com/kubernetes-sigs/controller-runtime/releases. + :tada: Thanks to all our contributors! +```` + +2. An announcement email is sent to `kubebuilder@googlegroups.com` with the subject `[ANNOUNCE] Controller-Runtime $VERSION is released` + +[kubebuilder-release-tools]: https://github.com/kubernetes-sigs/kubebuilder-release-tools +[release-notes-generation]: https://github.com/kubernetes-sigs/kubebuilder-release-tools/blob/master/README.md#release-notes-generation +[release-process]: https://github.com/kubernetes-sigs/kubebuilder/blob/master/VERSIONING.md#releasing diff --git a/SECURITY_CONTACTS b/SECURITY_CONTACTS index 6f826fe021..9c5241c6b4 100644 --- a/SECURITY_CONTACTS +++ b/SECURITY_CONTACTS @@ -10,6 +10,6 @@ # DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE # INSTRUCTIONS AT https://kubernetes.io/security/ -directxman12 -pwittrock -droot +alvaroaleman +sbueringer +vincepri diff --git a/TMP-LOGGING.md b/TMP-LOGGING.md index 78da1951de..97e091fd48 100644 --- a/TMP-LOGGING.md +++ b/TMP-LOGGING.md @@ -11,7 +11,7 @@ need to adjust how you think about logging a bit. With structured logging, we associate a *constant* log message with some variable key-value pairs. For instance, suppose we wanted to log that we -were starting reconciliation on a pod. In the Go standard libary logger, +were starting reconciliation on a pod. In the Go standard library logger, we might write: ```go @@ -21,7 +21,7 @@ log.Printf("starting reconciliation for pod %s/%s", podNamespace, podName) In controller-runtime, we'd instead write: ```go -logger.Info("starting reconciliation", "pod", req.NamespacedNamed) +logger.Info("starting reconciliation", "pod", req.NamespacedName) ``` or even write @@ -30,7 +30,7 @@ or even write func (r *Reconciler) Reconcile(req reconcile.Request) (reconcile.Response, error) { logger := logger.WithValues("pod", req.NamespacedName) // do some stuff - logger.Info("starting reconcilation") + logger.Info("starting reconciliation") } ``` @@ -49,16 +49,16 @@ provides some helpers to make it easy to use You can configure the logging implementation using `"sigs.k8s.io/controller-runtime/pkg/log".SetLogger`. That -package also contains the convinience functions for setting up Zap. +package also contains the convenience functions for setting up Zap. -You can get a handle to the the "root" logger using +You can get a handle to the "root" logger using `"sigs.k8s.io/controller-runtime/pkg/log".Log`, and can then call `WithName` to create individual named loggers. You can call `WithName` repeatedly to chain names together: ```go logger := log.Log.WithName("controller").WithName("replicaset") -// in reconile... +// in reconcile... logger = logger.WithValues("replicaset", req.NamespacedName) // later on in reconcile... logger.Info("doing things with pods", "pod", newPod) @@ -75,7 +75,7 @@ allKubernetesObjectsEverywhere) ``` While it's possible to use higher log levels, it's recommended that you -stick with `V(1)` or V(0)` (which is equivalent to not specifying `V`), +stick with `V(1)` or `V(0)` (which is equivalent to not specifying `V`), and then filter later based on key-value pairs or messages; different numbers tend to lose meaning easily over time, and you'll be left wondering why particular logs lines are at `V(5)` instead of `V(7)`. @@ -86,10 +86,13 @@ Errors should *always* be logged with `log.Error`, which allows logr implementations to provide special handling of errors (for instance, providing stack traces in debug mode). -It's acceptible to log call `log.Error` with a nil error object. This +It's acceptable to log call `log.Error` with a nil error object. This conveys that an error occurred in some capacity, but that no actual `error` object was involved. +Errors returned by the `Reconcile` implementation of the `Reconciler` interface are commonly logged as a `Reconciler error`. +It's a developer choice to create an additional error log in the `Reconcile` implementation so a more specific file name and line for the error are returned. + ## Logging messages - Don't put variable content in your messages -- use key-value pairs for @@ -125,12 +128,12 @@ logic. ### Groups, Versions, and Kinds -- Kinds should not be logged alone (they're meanless alone). Use +- Kinds should not be logged alone (they're meaningless alone). Use a `GroupKind` object to log them instead, or a `GroupVersionKind` when version is relevant. - If you need to log an API version string, use `api version` as the key - (formatted as with a `GroupVersion`, or as recieved directly from API + (formatted as with a `GroupVersion`, or as received directly from API discovery). ### Objects and Types diff --git a/VERSIONING.md b/VERSIONING.md index 3b7831532b..7ad6b142cc 100644 --- a/VERSIONING.md +++ b/VERSIONING.md @@ -1,267 +1,40 @@ # Versioning and Branching in controller-runtime -*NB*: this also applies to controller-tools. +We follow the [common KubeBuilder versioning guidelines][guidelines], and +use the corresponding tooling. -## TL;DR: +For the purposes of the aforementioned guidelines, controller-runtime +counts as a "library project", but otherwise follows the guidelines +exactly. -### Users +We stick to a major version of zero and create a minor version for +each Kubernetes minor version and we allow breaking changes in our +minor versions. We create patch releases as needed and don't allow +breaking changes in them. -- We follow [Semantic Versioning (semver)](https://semver.org) -- Use releases with your dependency management to ensure that you get - compatible code -- The master branch contains all the latest code, some of which may break - compatibility (so "normal" `go get` is not recommended) +Publishing a non-zero major version is pointless for us, as the k8s.io/* +libraries we heavily depend on do breaking changes but use the same +versioning scheme as described above. Consequently, a project can only +ever depend on one controller-runtime version. -### Contributors +[guidelines]: https://sigs.k8s.io/kubebuilder-release-tools/VERSIONING.md -- All code PR must be labeled with :bug: (patch fixes), :sparkles: - (backwards-compatible features), or :warning: (breaking changes) +## Compatibility and Release Support -- Breaking changes will find their way into the next major release, other - changes will go into an semi-immediate patch or minor release +For release branches, we generally tend to support backporting one (1) +major release (`release-{X-1}` or `release-0.{Y-1}`), but may go back +further if the need arises and is very pressing (e.g. security updates). -- Please *try* to avoid breaking changes when you can. They make users - face difficult decisions ("when do I go through the pain of - upgrading?"), and make life hard for maintainers and contributors - (dealing with differences on stable branches). +### Dependency Support -### Mantainers +Note the [guidelines on dependency versions][dep-versions]. Particularly: -Don't be lazy, read the rest of this doc :-) +- We **DO** guarantee Kubernetes REST API compatibility -- if a given + version of controller-runtime stops working with what should be + a supported version of Kubernetes, this is almost certainly a bug. -## Overview +- We **DO NOT** guarantee any particular compatibility matrix between + kubernetes library dependencies (client-go, apimachinery, etc); Such + compatibility is infeasible due to the way those libraries are versioned. -controller-runtime (and friends) follow [Semantic -Versioning](https://semver.org). I'd recommend reading the aforementioned -link if you're not familiar, but essentially, for any given release X.Y.Z: - -- an X (*major*) release indicates a set of backwards-compatible code. - Changing X means there's a breaking change. - -- a Y (*minor*) release indicates a minimum feature set. Changing Y means - the addition of a backwards-compatible feature. - -- a Z (*patch*) release indicates minimum set of bugfixes. Changing - Z means a backwards-compatible change that doesn't add functionality. - -*NB*: If the major release is `0`, any minor release may contain breaking -changes. - -These guarantees extend to all code exposed in public APIs of -controller-runtime. This includes code both in controller-runtime itself, -*plus types from dependencies in public APIs*. Types and functions not in -public APIs are not considered part of the guarantee. - -In order to easily maintain the guarantees, we have a couple of processes -that we follow. - -## Branches - -controller-runtime contains two types of branches: the *master* branch and -*release-X* branches. - -The *master* branch is where development happens. All the latest and -greatest code, including breaking changes, happens on master. - -The *release-X* branches contain stable, backwards compatible code. Every -major (X) release, a new such branch is created. It is from these -branches that minor and patch releases are tagged. If some cases, it may -be neccessary open PRs for bugfixes directly against stable branches, but -this should generally not be the case. - -The maintainers are responsible for updating the contents of this branch; -generally, this is done just before a release using release tooling that -filters and checks for changes tagged as breaking (see below). - -### Tooling - -* [release-notes.sh](hack/release-notes.sh): generate release notes - for a range of commits, and check for next version type (***TODO***) - -* [verify-commit-messages.sh](hack/verify-commit-messages.sh): check that - your PR and/or commit messages have the right versioning icon - (***TODO***). - -## PR Process - -Every PR should be annotated with an icon indicating whether it's -a: - -- Breaking change: :warning: (`:warning:`) -- Non-breaking feature: :sparkles: (`:sparkles:`) -- Patch fix: :bug: (`:bug:`) -- Docs: :book: (`:book:`) -- Infra/Tests/Other: :running: (`:running:`) - -You can also use the equivalent emoji directly, since GitHub doesn't -render the `:xyz:` aliases in PR titles. - -Individual commits should not be tagged separately, but will generally be -assumed to match the PR. For instance, if you have a bugfix in with -a breaking change, it's generally encouraged to submit the bugfix -separately, but if you must put them in one PR, mark the commit -separately. - -### Commands and Workflow - -controller-runtime follows the standard Kubernetes workflow: any PR needs -`lgtm` and `approved` labels, PRs authors must have signed the CNCF CLA, -and PRs must pass the tests before being merged. See [the contributor -docs](https://github.com/kubernetes/community/blob/master/contributors/guide/pull-requests.md#the-testing-and-merge-workflow) -for more info. - -We use the same priority and kind labels as Kubernetes. See the labels -tab in GitHub for the full list. - -The standard Kubernetes comment commands should work in -controller-runtime. See [Prow](https://prow.k8s.io/command-help) for -a command reference. - -## Release Process - -Minor and patch releases are generally done immediately after a feature or -bugfix is landed, or sometimes a series of features tied together. - -Minor releases will only be tagged on the *most recent* major release -branch, except in exceptional circumstances. Patches will be backported -to maintained stable versions, as needed. - -Major releases are done shortly after a breaking change is merged -- once -a breaking change is merged, the next release *must* be a major revison. -We don't intend to have a lot of these, so we may put off merging breaking -PRs until a later date. - -### Exact Steps - -Follow the release-specific steps below, then follow the general steps -after that. - -#### Minor and patch releases - -1. Update the release-X branch with the latest set of changes by calling - `git rebase master` from the release branch. - -#### Major releases - -1. Create a new release branch named `release-X` (where `X` is the new - version) off of master. - -#### General - -2. Generate release notes using the release note tooling. - -3. Add a release for controller-runtime on GitHub, using those release - notes, with a title of `vX.Y.Z`. - -4. Do a similar process for - [controller-tools](https://github.com/kubernetes-sigs/controller-tools) - -5. Announce the release in `#kubebuilder` on Slack with a pinned message. - -6. Potentially update - [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder) as well. - -### Breaking Changes - -Try to avoid breaking changes. They make life difficult for users, who -have to rewrite their code when they eventually upgrade, and for -maintainers/contributors, who have to deal with differences between master -and stable branches. - -That being said, we'll occaisonally want to make breaking changes. They'll -be merged onto master, and will then trigger a major release (see [Release -Proccess](#release-process)). Because breaking changes induce a major -revision, the maintainers may delay a particular breaking change until -a later date when they are ready to make a major revision with a few -breaking changes. - -If you're going to make a breaking change, please make sure to explain in -detail why it's helpful. Is it necessary to cleanly resolve an issue? -Does it improve API ergonomics? - -Maintainers should treat breaking changes with caution, and evaluate -potential non-breaking solutions (see below). - -Note that API breakage in public APIs due to dependencies will trigger -a major revision, so you may occaisonally need to have a major release -anyway, due to changes in libraries like `k8s.io/client-go` or -`k8s.io/apimachinery`. - -*NB*: Pre-1.0 releases treat breaking changes a bit more lightly. We'll -still consider carefully, but the pre-1.0 timeframe is useful for -converging on a ergonomic API. - -#### Avoiding breaking changes - -##### Solutions to avoid - -- **Confusingly duplicate methods, functions, or variables.** - - For instance, suppose we have an interface method `List(ctx - context.Context, options *ListOptions, obj runtime.Object) error`, and - we decide to switch it so that options come at the end, parametrically. - Adding a new interface method `ListParametric(ctx context.Context, obj - runtime.Object, options... ListOption)` is probably not the right - solution: - - - Users will intuitively see `List`, and use that in new projects, even - if it's marked as deprecated. - - - Users who don't notice the deprecation may be confused as to the - difference between `List` and `ListParametric`. - - - It's not immediately obvious in isolation (e.g. in surrounding code) - why the method is called `ListParametric`, and may cause confusion - when reading code that makes use of that method. - - In this case, it may be better to make the breaking change, and then - eventually do a major release. - -## Why don't we... - -### Use "next"-style branches - -Development branches: - -- don't win us much in terms of maintenance in the case of breaking - changes (we still have to merge/manage multiple branches for development - and stable) - -- can be confusing to contributors, who often expect master to have the - latest changes. - -### Never break compatibility - -Never doing a new major release could be an admirable goal, but gradually -leads to API cruft. - -Since one of the goals of controller-runtime is to be a friendly and -intuitive API, we want to avoid too much API cruft over time, and -occaisonal breaking changes in major releases help accomplish that goal. - -Furthermore, our dependency on Kubernetes libraries makes this difficult -(see below) - -### Always assume we've broken compatibility - -*a.k.a. k8s.io/client-go style* - -While this makes life easier (a bit) for maintainers, it's problematic for -users. While breaking changes arrive sooner, upgrading becomes very -painful. - -Furthermore, we still have to maintain stable branches for bugfixes, so -the maintenance burden isn't lessened by a ton. - -### Extend compatibility guarantees to all dependencies - -This is very difficult with the number of Kubernetes dependencies we have. -Kubernetes dependencies tend to either break compatibility every major -release (e.g. k8s.io/client-go, which loosely follows semver), or at -a whim (many other Kubernetes libraries). - -If we limit to the few objects we expose, we can better inform users about -how *controller-runtime itself* has changed in a given release. Then, -users can make informed decisions about how to proceed with any direct -uses of Kubernetes dependencies their controller-runtime-based application -may have. +[dep-versions]: https://sigs.k8s.io/kubebuilder-release-tools/VERSIONING.md#kubernetes-version-compatibility diff --git a/alias.go b/alias.go index b41f51435b..01ba012dcc 100644 --- a/alias.go +++ b/alias.go @@ -44,7 +44,7 @@ type Result = reconcile.Result // A Manager is required to create Controllers. type Manager = manager.Manager -// Options are the arguments for creating a new Manager +// Options are the arguments for creating a new Manager. type Options = manager.Options // SchemeBuilder builds a new Scheme for mapping go types to Kubernetes GroupVersionKinds. @@ -54,7 +54,7 @@ type SchemeBuilder = scheme.Builder type GroupVersion = schema.GroupVersion // GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying -// concepts during lookup stages without having partially valid types +// concepts during lookup stages without having partially valid types. type GroupResource = schema.GroupResource // TypeMeta describes an individual object in an API response or request @@ -69,10 +69,17 @@ type TypeMeta = metav1.TypeMeta type ObjectMeta = metav1.ObjectMeta var ( + // RegisterFlags registers flag variables to the given FlagSet if not already registered. + // It uses the default command line FlagSet, if none is provided. Currently, it only registers the kubeconfig flag. + RegisterFlags = config.RegisterFlags + // GetConfigOrDie creates a *rest.Config for talking to a Kubernetes apiserver. // If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running // in cluster and use the cluster provided kubeconfig. // + // The returned `*rest.Config` has client-side ratelimting disabled as we can rely on API priority and + // fairness. Set its QPS to a value equal or bigger than 0 to re-enable it. + // // Will log an error and exit if there is an error creating the rest.Config. GetConfigOrDie = config.GetConfigOrDie @@ -80,6 +87,9 @@ var ( // If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running // in cluster and use the cluster provided kubeconfig. // + // The returned `*rest.Config` has client-side ratelimting disabled as we can rely on API priority and + // fairness. Set its QPS to a value equal or bigger than 0 to re-enable it. + // // Config precedence // // * --kubeconfig flag pointing at a file @@ -88,13 +98,19 @@ var ( // // * In-cluster config if running in cluster // - // * $HOME/.kube/config if exists + // * $HOME/.kube/config if exists. GetConfig = config.GetConfig - // NewControllerManagedBy returns a new controller builder that will be started by the provided Manager + // NewControllerManagedBy returns a new controller builder that will be started by the provided Manager. NewControllerManagedBy = builder.ControllerManagedBy + // NewWebhookManagedBy returns a new webhook builder that will be started by the provided Manager. + NewWebhookManagedBy = builder.WebhookManagedBy + // NewManager returns a new Manager for creating Controllers. + // Note that if ContentType in the given config is not set, "application/vnd.kubernetes.protobuf" + // will be used for all built-in resources of Kubernetes, and "application/json" is for other types + // including all CRD resources. NewManager = manager.New // CreateOrUpdate creates or updates the given object obj in the Kubernetes @@ -112,8 +128,8 @@ var ( // there is another OwnerReference with Controller flag set. SetControllerReference = controllerutil.SetControllerReference - // SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned - // which is closed on one of these signals. If a second signal is caught, the program + // SetupSignalHandler registers for SIGTERM and SIGINT. A context is returned + // which is canceled on one of these signals. If a second signal is caught, the program // is terminated with exit code 1. SetupSignalHandler = signals.SetupSignalHandler @@ -122,6 +138,20 @@ var ( // get any actual logging. Log = log.Log + // LoggerFrom returns a logger with predefined values from a context.Context. + // The logger, when used with controllers, can be expected to contain basic information about the object + // that's being reconciled like: + // - `reconciler group` and `reconciler kind` coming from the For(...) object passed in when building a controller. + // - `name` and `namespace` from the reconciliation request. + // + // This is meant to be used with the context supplied in a struct that satisfies the Reconciler interface. + LoggerFrom = log.FromContext + + // LoggerInto takes a context and sets the logger as one of its keys. + // + // This is meant to be used in reconcilers to enrich the logger within a context with additional values. + LoggerInto = log.IntoContext + // SetLogger sets a concrete logging implementation for all deferred Loggers. SetLogger = log.SetLogger ) diff --git a/designs/README.md b/designs/README.md new file mode 100644 index 0000000000..bf8b5000a9 --- /dev/null +++ b/designs/README.md @@ -0,0 +1,36 @@ +Designs +======= + +These are the design documents for changes to Controller Runtime. They +exist to help document the design processes that go into writing +Controller Runtime, but may not be up-to-date (more below). + +Not all changes to Controller Runtime need a design document -- only major +ones. Use your best judgement. + +When submitting a design document, we encourage having written +a proof-of-concept, and it's perfectly acceptable to submit the +proof-of-concept PR simultaneously with the design document, as the +proof-of-concept process can help iron out wrinkles and can help with the +`Example` section of the template. + +## Out-of-Date Designs + +**Controller Runtime documentation +[GoDoc](https://pkg.go.dev/sigs.k8s.io/controller-runtime) should be +considered the canonical, update-to-date reference and architectural +documentation** for Controller Runtime. + +However, if you see an out-of-date design document, feel free to submit +a PR marking it as such, and add an addendum linking to issues documenting +why things changed. For example: + +```markdown + +# Out of Date + +This change is out of date. It turns out curly braces are frustrating to +type, so we had to abandon functions entirely, and have users specify +custom functionality using strings of Common LISP instead. See #000 for +more information. +``` diff --git a/designs/cache_options.md b/designs/cache_options.md new file mode 100644 index 0000000000..bdd29c0481 --- /dev/null +++ b/designs/cache_options.md @@ -0,0 +1,226 @@ +Cache Options +=================== + +This document describes how we imagine the cache options to look in +the future. + +## Goals + +* Align everyone on what settings on the cache we want to support and + their configuration surface +* Ensure that we support both complicated cache setups and provide an + intuitive configuration UX + +## Non-Goals + +* Describe the design and implementation of the cache itself. + The assumption is that the most granular level we will end up with is + "per-object multiple namespaces with distinct selectors" and that this + can be implemented using a "meta cache" that delegates per object and by + extending the current multi-namespace cache +* Outline any kind of timeline for when these settings will be implemented. + Implementation will happen gradually over time whenever someone steps up + to do the actual work + +## Proposal + + +``` +const ( + AllNamespaces = corev1.NamespaceAll +) + +type Config struct { + // LabelSelector specifies a label selector. A nil value allows to + // default this. + LabelSelector labels.Selector + + // FieldSelector specifics a field selector. A nil value allows to + // default this. + FieldSelector fields.Selector + + // Transform specifies a transform func. A nil value allows to default + // this. + Transform toolscache.TransformFunc + + // UnsafeDisableDeepCopy specifies if List and Get requests against the + // cache should not DeepCopy. A nil value allows to default this. + UnsafeDisableDeepCopy *bool +} + + +type ByObject struct { + // Namespaces maps a namespace name to cache setting. If set, only the + // namespaces in this map will be cached. + // + // Settings in the map value that are unset because either the value as a + // whole is nil or because the specific setting is nil will be defaulted. + // Use an empty value for the specific setting to prevent that. + // + // It is possible to have specific Config for just some namespaces + // but cache all namespaces by using the AllNamespaces const as the map key. + // This wil then include all namespaces that do not have a more specific + // setting. + // + // A nil map allows to default this to the cache's DefaultNamespaces setting. + // An empty map prevents this. + // + // This must be unset for cluster-scoped objects. + Namespaces map[string]Config + + // Config will be used for cluster-scoped objects and to default + // Config in the Namespaces field. + // + // It gets defaulted from the cache'sDefaultLabelSelector, DefaultFieldSelector, + // DefaultUnsafeDisableDeepCopy and DefaultTransform. + Config *Config +} + +type Options struct { + // ByObject specifies per-object cache settings. If unset for a given + // object, this will fall through to Default* settings. + ByObject map[client.Object]ByObject + + // DefaultNamespaces maps namespace names to cache settings. If set, it + // will be used for all objects that have a nil Namespaces setting. + // + // It is possible to have a specific Config for just some namespaces + // but cache all namespaces by using the `AllNamespaces` const as the map + // key. This wil then include all namespaces that do not have a more + // specific setting. + // + // The options in the Config that are nil will be defaulted from + // the respective Default* settings. + DefaultNamespaces map[string]Config + + // DefaultLabelSelector is the label selector that will be used as + // the default field label selector for everything that doesn't + // have one configured. + DefaultLabelSelector labels.Selector + + // DefaultFieldSelector is the field selector that will be used as + // the default field selector for everything that doesn't have + // one configured. + DefaultFieldSelector fields.Selector + + // DefaultUnsafeDisableDeepCopy is the default for UnsafeDisableDeepCopy + // for everything that doesn't specify this. + DefaultUnsafeDisableDeepCopy *bool + + // DefaultTransform will be used as transform for all object types + // unless they have a more specific transform set in ByObject. + DefaultTransform toolscache.TransformFunc + + // HTTPClient is the http client to use for the REST client + HTTPClient *http.Client + + // Scheme is the scheme to use for mapping objects to GroupVersionKinds + Scheme *runtime.Scheme + + // Mapper is the RESTMapper to use for mapping GroupVersionKinds to Resources + Mapper meta.RESTMapper + + // SyncPeriod determines the minimum frequency at which watched resources are + // reconciled. A lower period will correct entropy more quickly, but reduce + // responsiveness to change if there are many watched resources. Change this + // value only if you know what you are doing. Defaults to 10 hours if unset. + // there will a 10 percent jitter between the SyncPeriod of all controllers + // so that all controllers will not send list requests simultaneously. + // + // This applies to all controllers. + // + // A period sync happens for two reasons: + // 1. To insure against a bug in the controller that causes an object to not + // be requeued, when it otherwise should be requeued. + // 2. To insure against an unknown bug in controller-runtime, or its dependencies, + // that causes an object to not be requeued, when it otherwise should be + // requeued, or to be removed from the queue, when it otherwise should not + // be removed. + // + // If you want + // 1. to insure against missed watch events, or + // 2. to poll services that cannot be watched, + // then we recommend that, instead of changing the default period, the + // controller requeue, with a constant duration `t`, whenever the controller + // is "done" with an object, and would otherwise not requeue it, i.e., we + // recommend the `Reconcile` function return `reconcile.Result{RequeueAfter: t}`, + // instead of `reconcile.Result{}`. + SyncPeriod *time.Duration + +} +``` + + +## Example usages + +### Cache ConfigMaps in the `public` and `kube-system` namespaces and Secrets in the `operator` Namespace + + +``` +cache.Options{ + ByObject: map[client.Object]cache.ByObject{ + &corev1.ConfigMap{}: { + Namespaces: map[string]cache.Config{ + "public": {}, + "kube-system": {}, + }, + }, + &corev1.Secret{}: {Namespaces: map[string]Config{ + "operator": {}, + }}, + }, +} +``` + +### Cache ConfigMaps in all namespaces without selector, but have a selector for the `operator` Namespace + +``` +cache.Options{ + ByObject: map[client.Object]cache.ByObject{ + &corev1.ConfigMap{}: { + Namespaces: map[string]cache.Config{ + cache.AllNamespaces: nil, // No selector for all namespaces... + "operator": {LabelSelector: labelSelector}, // except for the operator namespace + }, + }, + }, +} +``` + + +### Only cache the `operator` namespace for namespaced objects and all namespaces for Deployments + +``` +cache.Options{ + ByObject: map[client.Object]cache.ByObject{ + &appsv1.Deployment: {Namespaces: map[string]cache.Config{ + cache.AllNamespaces: {}}, + }}, + }, + DefaultNamespaces: map[string]cache.Config{ + "operator": {}}, + }, +} +``` + +### Use a LabelSelector for everything except Nodes + +``` +cache.Options{ + ByObject: map[client.Object]cache.ByObject{ + &corev1.Node: {LabelSelector: labels.Everything()}, + }, + DefaultLabelSelector: myLabelSelector, +} +``` + +### Only cache namespaced objects in the `foo` and `bar` namespace + +``` +cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + "foo": {}, + "bar": {}, + } +} +``` diff --git a/designs/component-config.md b/designs/component-config.md new file mode 100644 index 0000000000..8aebec4f96 --- /dev/null +++ b/designs/component-config.md @@ -0,0 +1,320 @@ +# ComponentConfig Controller Runtime Support +Author: @christopherhein + +Last Updated on: 03/02/2020 + +## Table of Contents + + + * [ComponentConfig Controller Runtime Support](#componentconfig-controller-runtime-support) + * [Table of Contents](#table-of-contents) + * [Summary](#summary) + * [Motivation](#motivation) + * [Links to Open Issues](#links-to-open-issues) + * [Goals](#goals) + * [Non-Goals/Future Work](#non-goalsfuture-work) + * [Proposal](#proposal) + * [ComponentConfig Load Order](#componentconfig-load-order) + * [Embeddable ComponentConfig Type](#embeddable-componentconfig-type) + * [Default ComponentConfig Type](#default-componentconfig-type) + * [Using Flags w/ ComponentConfig](#using-flags-w-componentconfig) + * [Kubebuilder Scaffolding Example](#kubebuilder-scaffolding-example) + * [User Stories](#user-stories) + * [Controller Author with controller-runtime and default type](#controller-author-with-controller-runtime-and-default-type) + * [Controller Author with controller-runtime and custom type](#controller-author-with-controller-runtime-and-custom-type) + * [Controller Author with kubebuilder (tbd proposal for kubebuilder)](#controller-author-with-kubebuilder-tbd-proposal-for-kubebuilder) + * [Controller User without modifications to config](#controller-user-without-modifications-to-config) + * [Controller User with modifications to config](#controller-user-with-modifications-to-config) + * [Risks and Mitigations](#risks-and-mitigations) + * [Alternatives](#alternatives) + * [Implementation History](#implementation-history) + + + +## Summary + +Currently controllers that use `controller-runtime` need to configure the `ctrl.Manager` by using flags or hardcoding values into the initialization methods. Core Kubernetes has started to move away from using flags as a mechanism for configuring components and standardized on [`ComponentConfig` or Versioned Component Configuration Files](https://docs.google.com/document/d/1FdaEJUEh091qf5B98HM6_8MS764iXrxxigNIdwHYW9c/edit). This proposal is to bring `ComponentConfig` to `controller-runtime` to allow controller authors to make `go` types backed by `apimachinery` to unmarshal and configure the `ctrl.Manager{}` reducing the flags and allowing code based tools to easily configure controllers instead of requiring them to mutate CLI args. + +## Motivation + +This change is important because: +- it will help make it easier for controllers to be configured by other machine processes +- it will reduce the required flags required to start a controller +- allow for configuration types which aren't natively supported by flags +- allow using and upgrading older configurations avoiding breaking changes in flags + +### Links to Open Issues + +- [#518 Provide a ComponentConfig to tweak the Manager](https://github.com/kubernetes-sigs/controller-runtime/issues/518) +- [#207 Reduce command line flag boilerplate](https://github.com/kubernetes-sigs/controller-runtime/issues/207) +- [#722 Implement ComponentConfig by default & stop using (most) flags](https://github.com/kubernetes-sigs/kubebuilder/issues/722) + +### Goals + +- Provide an interface for pulling configuration data out of exposed `ComponentConfig` types (see below for implementation) +- Provide a new `ctrl.NewFromComponentConfig()` function for initializing a manager +- Provide an embeddable `ControllerManagerConfiguration` type for easily authoring `ComponentConfig` types +- Provide an `DefaultControllerConfig` to make the switching easier for clients + +### Non-Goals/Future Work + +- `kubebuilder` implementation and design in another PR +- Changing the default `controller-runtime` implementation +- Dynamically reloading `ComponentConfig` object +- Providing `flags` interface and overrides + +## Proposal + +The `ctrl.Manager` _SHOULD_ support loading configurations from `ComponentConfig` like objects. +An interface for that object with getters for the specific configuration parameters is created to bridge existing patterns. + +Without breaking the current `ctrl.NewManager` which uses an exported `ctrl.Options{}` the `manager.go` can expose a new func, `NewFromComponentConfig()` this would be able to loop through the getters to populate an internal `ctrl.Options{}` and pass that into `New()`. + +```golang +//pkg/manager/manager.go + +// ManagerConfiguration defines what the ComponentConfig object for ControllerRuntime needs to support +type ManagerConfiguration interface { + GetSyncPeriod() *time.Duration + + GetLeaderElection() bool + GetLeaderElectionNamespace() string + GetLeaderElectionID() string + + GetLeaseDuration() *time.Duration + GetRenewDeadline() *time.Duration + GetRetryPeriod() *time.Duration + + GetNamespace() string + GetMetricsBindAddress() string + GetHealthProbeBindAddress() string + + GetReadinessEndpointName() string + GetLivenessEndpointName() string + + GetPort() int + GetHost() string + + GetCertDir() string +} + +func NewFromComponentConfig(config *rest.Config, scheme *runtime.Scheme, filename string, managerconfig ManagerConfiguration) (Manager, error) { + codecs := serializer.NewCodecFactory(scheme) + if err := decodeComponentConfigFileInto(codecs, filename, managerconfig); err != nil { + + } + options := Options{} + + if scheme != nil { + options.Scheme = scheme + } + + // Loop through getters + if managerconfig.GetLeaderElection() { + options.LeaderElection = managerconfig.GetLeaderElection() + } + // ... + + return New(config, options) +} +``` + +#### ComponentConfig Load Order + +![ComponentConfig Load Order](/designs/images/component-config-load.png) + +#### Embeddable ComponentConfig Type + +To make this easier for Controller authors `controller-runtime` can expose a set of `config.ControllerConfiguration` type that can be embedded similar to the way that `k8s.io/apimachinery/pkg/apis/meta/v1` works for `TypeMeta` and `ObjectMeta` these could live in `pkg/api/config/v1alpha1/types.go`. See the `DefaultComponentConfig` type below for and example implementation. + +```golang +// pkg/api/config/v1alpha1/types.go +package v1alpha1 + +import ( + "time" + + configv1alpha1 "k8s.io/component-base/config/v1alpha1" +) + +// ControllerManagerConfiguration defines the embedded RuntimeConfiguration for controller-runtime clients. +type ControllerManagerConfiguration struct { + Namespace string `json:"namespace,omitempty"` + + SyncPeriod *time.Duration `json:"syncPeriod,omitempty"` + + LeaderElection configv1alpha1.LeaderElectionConfiguration `json:"leaderElection,omitempty"` + + MetricsBindAddress string `json:"metricsBindAddress,omitempty"` + + Health ControllerManagerConfigurationHealth `json:"health,omitempty"` + + Port *int `json:"port,omitempty"` + Host string `json:"host,omitempty"` + + CertDir string `json:"certDir,omitempty"` +} + +// ControllerManagerConfigurationHealth defines the health configs +type ControllerManagerConfigurationHealth struct { + HealthProbeBindAddress string `json:"healthProbeBindAddress,omitempty"` + + ReadinessEndpointName string `json:"readinessEndpointName,omitempty"` + LivenessEndpointName string `json:"livenessEndpointName,omitempty"` +} +``` + + + +#### Default ComponentConfig Type + +To enable `controller-runtime` to have a default `ComponentConfig` struct which can be used instead of requiring each controller or extension to build its own `ComponentConfig` type, we can create a `DefaultControllerConfiguration` type which can exist in `pkg/api/config/v1alpha1/types.go`. This will allow the controller authors to use this before needing to implement their own type with additional configs. + +```golang +// pkg/api/config/v1alpha1/types.go +package v1alpha1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + configv1alpha1 "sigs.k8s.io/controller-runtime/pkg/apis/config/v1alpha1" +) + +// DefaultControllerManagerConfiguration is the Schema for the DefaultControllerManagerConfigurations API +type DefaultControllerManagerConfiguration struct { + metav1.TypeMeta `json:",inline"` + + Spec configv1alpha1.ControllerManagerConfiguration `json:"spec,omitempty"` +} +``` + +This would allow a controller author to use this struct with any config that supports the json/yaml structure. For example a controller author could define their `Kind` as `FoobarControllerConfiguration` and have it defined as the following. + +```yaml +# config.yaml +apiVersion: config.somedomain.io/v1alpha1 +kind: FoobarControllerManagerConfiguration +spec: + port: 9443 + metricsBindAddress: ":8080" + leaderElection: + leaderElect: false +``` + +Given the following config and `DefaultControllerManagerConfiguration` we'd be able to initialize the controller using the following. + + +```golang +mgr, err := ctrl.NewManagerFromComponentConfig(ctrl.GetConfigOrDie(), scheme, configname, &defaultv1alpha1.DefaultControllerManagerConfiguration{}) +if err != nil { + // ... +} +``` + +The above example uses `configname` which is the name of the file to load the configuration from and uses `scheme` to get the specific serializer, eg `serializer.NewCodecFactory(scheme)`. This will allow the configuration to be unmarshalled into the `runtime.Object` type and passed into the +`ctrl.NewManagerFromComponentConfig()` as a `ManagerConfiguration` interface. + +#### Using Flags w/ ComponentConfig + +Since this design still requires setting up the initial `ComponentConfig` type and passing in a pointer to `ctrl.NewFromComponentConfig()` if you want to allow for the use of flags, your controller can use any of the different flagging interfaces. eg [`flag`](https://golang.org/pkg/flag/), [`pflag`](https://pkg.go.dev/github.com/spf13/pflag), [`flagnum`](https://pkg.go.dev/github.com/luci/luci-go/common/flag/flagenum) and set values on the `ComponentConfig` type prior to passing the pointer into the `ctrl.NewFromComponentConfig()`, example below. + +```golang +leaderElect := true + +config := &defaultv1alpha1.DefaultControllerManagerConfiguration{ + Spec: configv1alpha1.ControllerManagerConfiguration{ + LeaderElection: configv1alpha1.LeaderElectionConfiguration{ + LeaderElect: &leaderElect, + }, + }, +} +mgr, err := ctrl.NewManagerFromComponentConfig(ctrl.GetConfigOrDie(), scheme, configname, config) +if err != nil { + // ... +} +``` + +#### Kubebuilder Scaffolding Example + +Within expanded in a separate design _(link once created)_ this will allow controller authors to generate a type that implements the `ManagerConfiguration` interface. The following is a sample of what this looks like: + +```golang +package config + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + configv1alpha1 "sigs.k8s.io/controller-runtime/pkg/apis/config/v1alpha1" +) + +type ControllerNameConfigurationSpec struct { + configv1alpha1.ControllerManagerConfiguration `json:",inline"` +} + +type ControllerNameConfiguration struct { + metav1.TypeMeta + + Spec ControllerNameConfigurationSpec `json:"spec"` +} +``` + +Usage of this custom `ComponentConfig` type would require then changing the `ctrl.NewFromComponentConfig()` to use the new struct vs the `DefaultControllerManagerConfiguration`. + +## User Stories + +### Controller Author with `controller-runtime` and default type + +- Mount `ConfigMap` +- Initialize `ctrl.Manager` with `NewFromComponentConfig` with config name and `DefaultControllerManagerConfiguration` type +- Build custom controller as usual + +### Controller Author with `controller-runtime` and custom type + +- Implement `ComponentConfig` type +- Embed `ControllerManagerConfiguration` type +- Mount `ConfigMap` +- Initialize `ctrl.Manager` with `NewFromComponentConfig` with config name and `ComponentConfig` type +- Build custom controller as usual + +### Controller Author with `kubebuilder` (tbd proposal for `kubebuilder`) + +- Initialize `kubebuilder` project using `--component-config-name=XYZConfiguration` +- Build custom controller as usual + +### Controller User without modifications to config + +_Provided that the controller provides manifests_ + +- Apply the controller to the cluster +- Deploy custom resources + +### Controller User with modifications to config + +- _Following from previous example without changes_ +- Create a new `ConfigMap` for changes +- Modify the `controller-runtime` pod to use the new `ConfigMap` +- Apply the controller to the cluster +- Deploy custom resources + + +## Risks and Mitigations + +- Given that this isn't changing the core Manager initialization for `controller-runtime` it's fairly low risk + +## Alternatives + +* `NewFromComponentConfig()` could load the object from disk based on the file name and hydrate the `ComponentConfig` type. + +## Implementation History + +- [x] 02/19/2020: Proposed idea in an issue or [community meeting] +- [x] 02/24/2020: Proposal submitted to `controller-runtime` +- [x] 03/02/2020: Updated with default `DefaultControllerManagerConfiguration` +- [x] 03/04/2020: Updated with embeddable `RuntimeConfig` +- [x] 03/10/2020: Updated embeddable name to `ControllerManagerConfiguration` + + + +[community meeting]: https://docs.google.com/document/d/1Ih-2cgg1bUrLwLVTB9tADlPcVdgnuMNBGbUl4D-0TIk diff --git a/designs/images/component-config-load.png b/designs/images/component-config-load.png new file mode 100644 index 0000000000..04619779da Binary files /dev/null and b/designs/images/component-config-load.png differ diff --git a/designs/move-cluster-specific-code-out-of-manager.md b/designs/move-cluster-specific-code-out-of-manager.md new file mode 100644 index 0000000000..67b7a419a5 --- /dev/null +++ b/designs/move-cluster-specific-code-out-of-manager.md @@ -0,0 +1,228 @@ +Move cluster-specific code out of the manager +=================== + +## Motivation + +Today, it is already possible to use controller-runtime to build controllers that act on +more than one cluster. However, this is undocumented and not straight-forward, requiring +users to look into the implementation details to figure out how to make this work. + +## Goals + +* Provide an easy-to-discover way to build controllers that act on multiple clusters +* Decouple the management of `Runnables` from the construction of "things that require a kubeconfig" +* Do not introduce changes for users that build controllers that act on one cluster only + +## Non-Goals + +## Proposal + +Currently, the `./pkg/manager.Manager` has two purposes: + +* Handle running controllers/other runnables and managing their lifecycle +* Setting up various things to interact with the Kubernetes cluster, + for example a `Client` and a `Cache` + +This works very well when building controllers that talk to a single cluster, +however some use-cases require controllers that interact with more than +one cluster. This multi-cluster usecase is very awkward today, because it +requires to construct one manager per cluster and adding all subsequent +managers to the first one. + +This document proposes to move all cluster-specific code out of the manager +and into a new package and interface, that then gets embedded into the manager. +This allows to keep the usage for single-cluster cases the same and introduce +this change in a backwards-compatible manner. + +Furthermore, the manager gets extended to start all caches before any other +`runnables` are started. + + +The new `Cluster` interface will look like this: + +```go +type Cluster interface { + // SetFields will set cluster-specific dependencies on an object for which the object has implemented the inject + // interface, specifically inject.Client, inject.Cache, inject.Scheme, inject.Config and inject.APIReader + SetFields(interface{}) error + + // GetConfig returns an initialized Config + GetConfig() *rest.Config + + // GetClient returns a client configured with the Config. This client may + // not be a fully "direct" client -- it may read from a cache, for + // instance. See Options.NewClient for more information on how the default + // implementation works. + GetClient() client.Client + + // GetFieldIndexer returns a client.FieldIndexer configured with the client + GetFieldIndexer() client.FieldIndexer + + // GetCache returns a cache.Cache + GetCache() cache.Cache + + // GetEventRecorderFor returns a new EventRecorder for the provided name + GetEventRecorderFor(name string) record.EventRecorder + + // GetRESTMapper returns a RESTMapper + GetRESTMapper() meta.RESTMapper + + // GetAPIReader returns a reader that will be configured to use the API server. + // This should be used sparingly and only when the client does not fit your + // use case. + GetAPIReader() client.Reader + + // GetScheme returns an initialized Scheme + GetScheme() *runtime.Scheme + + // Start starts the connection tothe Cluster + Start(<-chan struct{}) error +} +``` + +And the current `Manager` interface will change to look like this: + +```go +type Manager interface { + // Cluster holds objects to connect to a cluster + cluser.Cluster + + // Add will set requested dependencies on the component, and cause the component to be + // started when Start is called. Add will inject any dependencies for which the argument + // implements the inject interface - e.g. inject.Client. + // Depending on if a Runnable implements LeaderElectionRunnable interface, a Runnable can be run in either + // non-leaderelection mode (always running) or leader election mode (managed by leader election if enabled). + Add(Runnable) error + + // Elected is closed when this manager is elected leader of a group of + // managers, either because it won a leader election or because no leader + // election was configured. + Elected() <-chan struct{} + + // SetFields will set any dependencies on an object for which the object has implemented the inject + // interface - e.g. inject.Client. + SetFields(interface{}) error + + // AddMetricsExtraHandler adds an extra handler served on path to the http server that serves metrics. + // Might be useful to register some diagnostic endpoints e.g. pprof. Note that these endpoints meant to be + // sensitive and shouldn't be exposed publicly. + // If the simple path -> handler mapping offered here is not enough, a new http server/listener should be added as + // Runnable to the manager via Add method. + AddMetricsExtraHandler(path string, handler http.Handler) error + + // AddHealthzCheck allows you to add Healthz checker + AddHealthzCheck(name string, check healthz.Checker) error + + // AddReadyzCheck allows you to add Readyz checker + AddReadyzCheck(name string, check healthz.Checker) error + + // Start starts all registered Controllers and blocks until the Stop channel is closed. + // Returns an error if there is an error starting any controller. + // If LeaderElection is used, the binary must be exited immediately after this returns, + // otherwise components that need leader election might continue to run after the leader + // lock was lost. + Start(<-chan struct{}) error + + // GetWebhookServer returns a webhook.Server + GetWebhookServer() *webhook.Server +} +``` + +Furthermore, during startup, the `Manager` will use type assertion to find `Cluster`s +to be able to start their caches before anything else: + +```go +type HasCaches interface { + GetCache() +} +if getter, hasCaches := runnable.(HasCaches); hasCaches { + m.caches = append(m.caches, getter()) +} +``` + +```go +for idx := range cm.caches { + go func(idx int) {cm.caches[idx].Start(cm.internalStop)} +} + +for _, cache := range cm.caches { + cache.WaitForCacheSync(cm.internalStop) +} + +// Start all other runnables +``` + +## Example + +Below is a sample `reconciler` that will create a secret in a `mirrorCluster` for each +secret found in `referenceCluster` if none of that name already exists. To keep the sample +short, it won't compare the contents of the secrets. + +```go +type secretMirrorReconciler struct { + referenceClusterClient, mirrorClusterClient client.Client +} + +func (r *secretMirrorReconciler) Reconcile(r reconcile.Request)(reconcile.Result, error){ + s := &corev1.Secret{} + if err := r.referenceClusterClient.Get(context.TODO(), r.NamespacedName, s); err != nil { + if kerrors.IsNotFound{ return reconcile.Result{}, nil } + return reconcile.Result, err + } + + if err := r.mirrorClusterClient.Get(context.TODO(), r.NamespacedName, &corev1.Secret); err != nil { + if !kerrors.IsNotFound(err) { + return reconcile.Result{}, err + } + + mirrorSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: s.Namespace, Name: s.Name}, + Data: s.Data, + } + return reconcile.Result{}, r.mirrorClusterClient.Create(context.TODO(), mirrorSecret) + } + + return nil +} + +func NewSecretMirrorReconciler(mgr manager.Manager, mirrorCluster cluster.Cluster) error { + return ctrl.NewControllerManagedBy(mgr). + // Watch Secrets in the reference cluster + For(&corev1.Secret{}). + // Watch Secrets in the mirror cluster + Watches( + source.NewKindWithCache(&corev1.Secret{}, mirrorCluster.GetCache()), + &handler.EnqueueRequestForObject{}, + ). + Complete(&secretMirrorReconciler{ + referenceClusterClient: mgr.GetClient(), + mirrorClusterClient: mirrorCluster.GetClient(), + }) + } +} + +func main(){ + + mgr, err := manager.New( cfg1, manager.Options{}) + if err != nil { + panic(err) + } + + mirrorCluster, err := cluster.New(cfg2) + if err != nil { + panic(err) + } + + if err := mgr.Add(mirrorCluster); err != nil { + panic(err) + } + + if err := NewSecretMirrorReconciler(mgr, mirrorCluster); err != nil { + panic(err) + } + + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + panic(err) + } +} +``` diff --git a/designs/priorityqueue.md b/designs/priorityqueue.md new file mode 100644 index 0000000000..ef1f7588a6 --- /dev/null +++ b/designs/priorityqueue.md @@ -0,0 +1,110 @@ +Priority Queue +=================== + +This document describes the motivation behind implementing a priority queue +in controller-runtime and its design details. + +## Motivation + +1. Controllers reconcile all objects during startup to account for changes in + the reconciliation logic. Some controllers also periodically re-reconcile + everything to account for out of band changes they do not get notified for, + this is for example common for controllers managing cloud resources. In both + these cases, the reconciliation of new or changed objects gets delayed, + resulting in poor user experience. [Example][0] +2. There may be application-specific reason why some events are more important + than others, [Example][1] + +## Proposed changes + +Implement a priority queue in controller-runtime that exposes the following +interface: + +```go +type PriorityQueue[T comparable] interface { + // AddWithOpts adds one or more items to the workqueue. Items + // in the workqueue are de-duplicated, so there will only ever + // be one entry for a given key. + // Adding an item that is already there may update its wait + // period to the lowest of existing and new wait period or + // its priority to the highest of existing and new priority. + AddWithOpts(o AddOpts, items ...T) + + // GetWithPriority returns an item and its priority. It allows + // a controller to re-use the priority if it enqueues an item + // again. + GetWithPriority() (item T, priority int, shutdown bool) + + // workqueue.TypedRateLimitingInterface is kept for backwards + // compatibility. + workqueue.TypedRateLimitingInterface[T] +} + +type AddOpts struct { + // After is a duration after which the object will be available for + // reconciliation. If the object is already in the workqueue, the + // lowest of existing and new After period will be used. + After time.Duration + + // Ratelimited specifies if the ratelimiter should be used to + // determine a wait period. If the object is already in the + // workqueue, the lowest of existing and new wait period will be + // used. + RateLimited bool + + // Priority specifies the priority of the object. Objects with higher + // priority are returned before objects with lower priority. If the + // object is already in the workqueue, the priority will be updated + // to the highest of existing and new priority. + // + // The default value is 0. + Priority int +} +``` + +In order to fix the issue described in point one of the motivation section, +we have to be able to differentiate events stemming from the initial list +during startup and from resyncs from other events. For events from the initial +list, the informer emits a `Create` event whereas for `Resync` it emits an `Update` +event. The suggestion is to use a heuristic for `Create` events, if the object +in there is older than one minute, it is assumed to be from the initial `List`. +For the `Resync`, we simply check if the `ResourceVersion` is unchanged. +In both these cases, we will lower the priority to `LowPriority`/`-100`. +This gives some room for use-cases where people want to use a priority that +is lower than default (`0`) but higher than what we use in the wrapper. + +```go +// WithLowPriorityWhenUnchanged wraps an existing handler and will +// reduce the priority of events stemming from the initial listwatch +// or cache resyncs to LowPriority. +func WithLowPriorityWhenUnchanged[object client.Object, request comparable](u TypedEventHandler[object, request]) TypedEventHandler[object, request]{ +} +``` + +```go +// LowPriority is the priority set by WithLowPriorityWhenUnchanged +const LowPriority = -100 +``` + +The issue described in point two of the motivation section ("application-specific +reasons to prioritize some events") will always require implementation of a custom +handler or eventsource in order to inject the appropriate priority. + +## Implementation stages + +In order to safely roll this out to all controller-runtime users, it is suggested to +divide the implementation into two stages: Initially, we will add the priority queue +but mark it as experimental and all usage of it requires explicit opt-in by setting +a boolean on the manager or configuring `NewQueue` in a controllers opts. There will +be no breaking changes required for this, but sources or handlers that want to make +use of the new queue will have to use type assertions. + +After we've gained some confidence that the implementation is useful and correct, we +will make it the default. Doing so entails breaking the `source.Source` and the +`handler.Handler` interfaces as well as the `controller.Options` struct to refer to +the new workqueue interface. We will wait at least one minor release after introducing +the `PriorityQueue` before doing this. + + +* [0]: https://youtu.be/AYNaaXlV8LQ?si=i2Pfo7Ske6rTrPLS +* [1]: https://github.com/cilium/cilium/blob/a17d6945b29c177209af3d985bd82cce49eed4a1/operator/pkg/ciliumendpointslice/controller.go#L73 diff --git a/designs/template.md b/designs/template.md new file mode 100644 index 0000000000..c04d6e0c71 --- /dev/null +++ b/designs/template.md @@ -0,0 +1,21 @@ +Title of the Design +=================== + + + +## Example + + \ No newline at end of file diff --git a/designs/use-selectors-at-cache.md b/designs/use-selectors-at-cache.md new file mode 100644 index 0000000000..1d7ec6ecfb --- /dev/null +++ b/designs/use-selectors-at-cache.md @@ -0,0 +1,122 @@ +# Filter cache ListWatch using selectors + +## Motivation + +Controller-Runtime controllers use a cache to subscribe to events from +Kubernetes objects and to read those objects more efficiently by avoiding +to call out to the API. This cache is backed by Kubernetes informers. + +The only way to filter this cache is by namespace and resource type. +In cases where a controller is only interested in a small subset of objects +(for example all pods on a node), this might end up not being efficient enough. + +Requests to a client backed by a filtered cache for objects that do not match +the filter will never return anything, so we need to make sure that we properly +warn users to only use this when they are sure they know what they are doing. + +This proposal sidesteps the issue of "How to we plug this into the cache-backed +client so that users get feedback when they request something that is +not matching the caches filter" by only implementing the filter logic in the +cache package. This allows advanced users to combine a filtered cache with the +already existing `NewCacheFunc` option in the manager and cluster package, +while simultaneously hiding it from newer users that might not be aware of the +implications and the associated foot-shoot potential. + +The only alternative today to get a filtered cache with controller-runtime is +to build it out-of tree. Because such a cache would mostly copy the existing +cache and add just some options, this is not great for consumers. + +This proposal is related to the following issue [2] + +## Proposal + +Add a new selector code at `pkg/cache/internal/selector.go` with common structs +and helpers + +```golang +package internal + +... + +// SelectorsByObject associate a runtime.Object to a field/label selector +type SelectorsByObject map[client.Object]Selector + +// SelectorsByGVK associate a GroupVersionResource to a field/label selector +type SelectorsByGVK map[schema.GroupVersionKind]Selector + +// Selector specify the label/field selector to fill in ListOptions +type Selector struct { + Label labels.Selector + Field fields.Selector +} + +// ApplyToList fill in ListOptions LabelSelector and FieldSelector if needed +func (s Selector) ApplyToList(listOpts *metav1.ListOptions) { +... +} +``` + +Add a type alias to `pkg/cache/cache.go` to internal + +```golang +type SelectorsByObject internal.SelectorsByObject +``` + +Extend `cache.Options` as follows: + +```golang +type Options struct { + Scheme *runtime.Scheme + Mapper meta.RESTMapper + Resync *time.Duration + Namespace string + SelectorsByObject SelectorsByObject +} +``` + +Add new builder function that will return a cache constructor using the passed +cache.Options, users can set SelectorsByObject there to filter out cache, it +will convert SelectorByObject to SelectorsByGVK + +```golang +func BuilderWithOptions(options cache.Options) NewCacheFunc { +... +} +``` + +is passed to informer's ListWatch and add the filtering option: + +```golang + +# At pkg/cache/internal/informers_map.go + +ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + ip.selectors[gvk].ApplyToList(&opts) +... +``` + +Here is a PR with the implementatin at the `pkg/cache` part [3] + +## Example + +User will override `NewCache` function to make clear that they know exactly the +implications of using a different cache than the default one + +```golang + ctrl.Options.NewCache = cache.BuilderWithOptions(cache.Options{ + SelectorsByObject: cache.SelectorsByObject{ + &corev1.Node{}: { + Field: fields.SelectorFromSet(fields.Set{"metadata.name": "node01"}), + } + &v1beta1.NodeNetworkState{}: { + Field: fields.SelectorFromSet(fields.Set{"metadata.name": "node01"}), + Label: labels.SelectorFromSet(labels.Set{"app": "kubernetes-nmstate})", + } + } + } + ) +``` + +[1] https://github.com/nmstate/kubernetes-nmstate/pull/687 +[2] https://github.com/kubernetes-sigs/controller-runtime/issues/244 +[3] https://github.com/kubernetes-sigs/controller-runtime/pull/1404 diff --git a/doc.go b/doc.go index 16eef52f8e..75d1d908c5 100644 --- a/doc.go +++ b/doc.go @@ -23,13 +23,14 @@ limitations under the License. // and uncommon cases should be possible. In general, controller-runtime tries // to guide users towards Kubernetes controller best-practices. // -// Getting Started +// # Getting Started // // The main entrypoint for controller-runtime is this root package, which // contains all of the common types needed to get started building controllers: -// import ( -// controllers "sigs.k8s.io/controller-runtime" -// ) +// +// import ( +// ctrl "sigs.k8s.io/controller-runtime" +// ) // // The examples in this package walk through a basic controller setup. The // kubebuilder book (https://book.kubebuilder.io) has some more in-depth @@ -38,36 +39,36 @@ limitations under the License. // controller-runtime favors structs with sane defaults over constructors, so // it's fairly common to see structs being used directly in controller-runtime. // -// Organization +// # Organization // // A brief-ish walkthrough of the layout of this library can be found below. Each // package contains more information about how to use it. // // Frequently asked questions about using controller-runtime and designing // controllers can be found at -// https://github.com/kubernetes-sigs/controller-runtime/blob/master/FAQ.md. +// https://github.com/kubernetes-sigs/controller-runtime/blob/main/FAQ.md. // -// Managers +// # Managers // // Every controller and webhook is ultimately run by a Manager (pkg/manager). A // manager is responsible for running controllers and webhooks, and setting up -// common dependencies (pkg/runtime/inject), like shared caches and clients, as +// common dependencies, like shared caches and clients, as // well as managing leader election (pkg/leaderelection). Managers are // generally configured to gracefully shut down controllers on pod termination // by wiring up a signal handler (pkg/manager/signals). // -// Controllers +// # Controllers // -// Controllers (pkg/controller) use events (pkg/events) to eventually trigger +// Controllers (pkg/controller) use events (pkg/event) to eventually trigger // reconcile requests. They may be constructed manually, but are often // constructed with a Builder (pkg/builder), which eases the wiring of event // sources (pkg/source), like Kubernetes API object changes, to event handlers // (pkg/handler), like "enqueue a reconcile request for the object owner". // Predicates (pkg/predicate) can be used to filter which events actually -// trigger reconciles. There are pre-written utilies for the common cases, and +// trigger reconciles. There are pre-written utilities for the common cases, and // interfaces and helpers for advanced cases. // -// Reconcilers +// # Reconcilers // // Controller logic is implemented in terms of Reconcilers (pkg/reconcile). A // Reconciler implements a function which takes a reconcile Request containing @@ -75,36 +76,39 @@ limitations under the License. // and returns a Response or an error indicating whether to requeue for a // second round of processing. // -// Clients and Caches +// # Clients and Caches // // Reconcilers use Clients (pkg/client) to access API objects. The default // client provided by the manager reads from a local shared cache (pkg/cache) // and writes directly to the API server, but clients can be constructed that // only talk to the API server, without a cache. The Cache will auto-populate // with watched objects, as well as when other structured objects are -// requested. Caches may also have indexes, which can be created via a -// FieldIndexer (pkg/client) obtained from the manager. Indexes can used to +// requested. The default split client does not promise to invalidate the cache +// during writes (nor does it promise sequential create/get coherence), and code +// should not assume a get immediately following a create/update will return +// the updated resource. Caches may also have indexes, which can be created via +// a FieldIndexer (pkg/client) obtained from the manager. Indexes can be used to // quickly and easily look up all objects with certain fields set. Reconcilers // may retrieve event recorders (pkg/recorder) to emit events using the // manager. // -// Schemes +// # Schemes // // Clients, Caches, and many other things in Kubernetes use Schemes // (pkg/scheme) to associate Go types to Kubernetes API Kinds // (Group-Version-Kinds, to be specific). // -// Webhooks +// # Webhooks // // Similarly, webhooks (pkg/webhook/admission) may be implemented directly, but // are often constructed using a builder (pkg/webhook/admission/builder). They // are run via a server (pkg/webhook) which is managed by a Manager. // -// Logging and Metrics +// # Logging and Metrics // // Logging (pkg/log) in controller-runtime is done via structured logs, using a // log set of interfaces called logr -// (https://godoc.org/github.com/go-logr/logr). While controller-runtime +// (https://pkg.go.dev/github.com/go-logr/logr). While controller-runtime // provides easy setup for using Zap (https://go.uber.org/zap, pkg/log/zap), // you can provide any implementation of logr as the base logger for // controller-runtime. @@ -114,7 +118,7 @@ limitations under the License. // serve these by an HTTP endpoint, and additional metrics may be registered to // this Registry as normal. // -// Testing +// # Testing // // You can easily build integration and unit tests for your controllers and // webhooks using the test Environment (pkg/envtest). This will automatically diff --git a/example_test.go b/example_test.go index 8a6f37eabc..cbbf032b0f 100644 --- a/example_test.go +++ b/example_test.go @@ -18,14 +18,24 @@ package controllerruntime_test import ( "context" + "encoding/json" "fmt" "os" "time" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - controllers "sigs.k8s.io/controller-runtime" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + // since we invoke tests with -ginkgo.junit-report we need to import ginkgo. + _ "github.com/onsi/ginkgo/v2" ) // This example creates a simple application Controller that is configured for ReplicaSets and Pods. @@ -34,17 +44,16 @@ import ( // ReplicaSetReconciler. // // * Start the application. -// TODO(pwittrock): Update this example when we have better dependency injection support func Example() { - var log = controllers.Log.WithName("builder-examples") + log := ctrl.Log.WithName("builder-examples") - manager, err := controllers.NewManager(controllers.GetConfigOrDie(), controllers.Options{}) + manager, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{}) if err != nil { log.Error(err, "could not create manager") os.Exit(1) } - err = controllers. + err = ctrl. NewControllerManagedBy(manager). // Create the Controller For(&appsv1.ReplicaSet{}). // ReplicaSet is the Application API Owns(&corev1.Pod{}). // ReplicaSet owns Pods created by it @@ -54,7 +63,95 @@ func Example() { os.Exit(1) } - if err := manager.Start(controllers.SetupSignalHandler()); err != nil { + if err := manager.Start(ctrl.SetupSignalHandler()); err != nil { + log.Error(err, "could not start manager") + os.Exit(1) + } +} + +type ExampleCRDWithConfigMapRef struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + ConfigMapRef corev1.LocalObjectReference `json:"configMapRef"` +} + +func deepCopyObject(arg any) runtime.Object { + // DO NOT use this code in production code, this is only for presentation purposes. + // in real code you should generate DeepCopy methods by using controller-gen CLI tool. + argBytes, err := json.Marshal(arg) + if err != nil { + panic(err) + } + out := &ExampleCRDWithConfigMapRefList{} + if err := json.Unmarshal(argBytes, out); err != nil { + panic(err) + } + return out +} + +// DeepCopyObject implements client.Object. +func (in *ExampleCRDWithConfigMapRef) DeepCopyObject() runtime.Object { + return deepCopyObject(in) +} + +type ExampleCRDWithConfigMapRefList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ExampleCRDWithConfigMapRef `json:"items"` +} + +// DeepCopyObject implements client.ObjectList. +func (in *ExampleCRDWithConfigMapRefList) DeepCopyObject() runtime.Object { + return deepCopyObject(in) +} + +// This example creates a simple application Controller that is configured for ExampleCRDWithConfigMapRef CRD. +// Any change in the configMap referenced in this Custom Resource will cause the re-reconcile of the parent ExampleCRDWithConfigMapRef +// due to the implementation of the .Watches method of "sigs.k8s.io/controller-runtime/pkg/builder".Builder. +func Example_customHandler() { + log := ctrl.Log.WithName("builder-examples") + + manager, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{}) + if err != nil { + log.Error(err, "could not create manager") + os.Exit(1) + } + + err = ctrl. + NewControllerManagedBy(manager). + For(&ExampleCRDWithConfigMapRef{}). + Watches(&corev1.ConfigMap{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, cm client.Object) []ctrl.Request { + // map a change from referenced configMap to ExampleCRDWithConfigMapRef, which causes its re-reconcile + crList := &ExampleCRDWithConfigMapRefList{} + if err := manager.GetClient().List(ctx, crList); err != nil { + manager.GetLogger().Error(err, "while listing ExampleCRDWithConfigMapRefs") + return nil + } + + reqs := make([]ctrl.Request, 0, len(crList.Items)) + for _, item := range crList.Items { + if item.ConfigMapRef.Name == cm.GetName() { + reqs = append(reqs, ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: item.GetNamespace(), + Name: item.GetName(), + }, + }) + } + } + + return reqs + })). + Complete(reconcile.Func(func(ctx context.Context, r reconcile.Request) (reconcile.Result, error) { + // Your business logic to implement the API by creating, updating, deleting objects goes here. + return reconcile.Result{}, nil + })) + if err != nil { + log.Error(err, "could not create controller") + os.Exit(1) + } + + if err := manager.Start(ctrl.SetupSignalHandler()); err != nil { log.Error(err, "could not start manager") os.Exit(1) } @@ -63,32 +160,33 @@ func Example() { // This example creates a simple application Controller that is configured for ReplicaSets and Pods. // This application controller will be running leader election with the provided configuration in the manager options. // If leader election configuration is not provided, controller runs leader election with default values. -// Default values taken from: https://github.com/kubernetes/apiserver/blob/master/pkg/apis/config/v1alpha1/defaults.go -// defaultLeaseDuration = 15 * time.Second -// defaultRenewDeadline = 10 * time.Second -// defaultRetryPeriod = 2 * time.Second +// Default values taken from: https://github.com/kubernetes/component-base/blob/master/config/v1alpha1/defaults.go +// * defaultLeaseDuration = 15 * time.Second +// * defaultRenewDeadline = 10 * time.Second +// * defaultRetryPeriod = 2 * time.Second // // * Create a new application for ReplicaSets that manages Pods owned by the ReplicaSet and calls into // ReplicaSetReconciler. // // * Start the application. -// TODO(pwittrock): Update this example when we have better dependency injection support func Example_updateLeaderElectionDurations() { - var log = controllers.Log.WithName("builder-examples") + log := ctrl.Log.WithName("builder-examples") leaseDuration := 100 * time.Second renewDeadline := 80 * time.Second retryPeriod := 20 * time.Second - manager, err := controllers.NewManager(controllers.GetConfigOrDie(), controllers.Options{ - LeaseDuration: &leaseDuration, - RenewDeadline: &renewDeadline, - RetryPeriod: &retryPeriod, - }) + manager, err := ctrl.NewManager( + ctrl.GetConfigOrDie(), + ctrl.Options{ + LeaseDuration: &leaseDuration, + RenewDeadline: &renewDeadline, + RetryPeriod: &retryPeriod, + }) if err != nil { log.Error(err, "could not create manager") os.Exit(1) } - err = controllers. + err = ctrl. NewControllerManagedBy(manager). // Create the Controller For(&appsv1.ReplicaSet{}). // ReplicaSet is the Application API Owns(&corev1.Pod{}). // ReplicaSet owns Pods created by it @@ -98,7 +196,7 @@ func Example_updateLeaderElectionDurations() { os.Exit(1) } - if err := manager.Start(controllers.SetupSignalHandler()); err != nil { + if err := manager.Start(ctrl.SetupSignalHandler()); err != nil { log.Error(err, "could not start manager") os.Exit(1) } @@ -115,28 +213,28 @@ type ReplicaSetReconciler struct { // // * Read the ReplicaSet // * Read the Pods -// * Set a Label on the ReplicaSet with the Pod count -func (a *ReplicaSetReconciler) Reconcile(req controllers.Request) (controllers.Result, error) { +// * Set a Label on the ReplicaSet with the Pod count. +func (a *ReplicaSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { // Read the ReplicaSet rs := &appsv1.ReplicaSet{} - err := a.Get(context.TODO(), req.NamespacedName, rs) + err := a.Get(ctx, req.NamespacedName, rs) if err != nil { - return controllers.Result{}, err + return ctrl.Result{}, err } // List the Pods matching the PodTemplate Labels pods := &corev1.PodList{} - err = a.List(context.TODO(), pods, client.InNamespace(req.Namespace), client.MatchingLabels(rs.Spec.Template.Labels)) + err = a.List(ctx, pods, client.InNamespace(req.Namespace), client.MatchingLabels(rs.Spec.Template.Labels)) if err != nil { - return controllers.Result{}, err + return ctrl.Result{}, err } // Update the ReplicaSet rs.Labels["pod-count"] = fmt.Sprintf("%v", len(pods.Items)) - err = a.Update(context.TODO(), rs) + err = a.Update(ctx, rs) if err != nil { - return controllers.Result{}, err + return ctrl.Result{}, err } - return controllers.Result{}, nil + return ctrl.Result{}, nil } diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 0000000000..2110ae214e --- /dev/null +++ b/examples/README.md @@ -0,0 +1,36 @@ +# Examples + +These two examples represent the usage of `controller-runtime` libraries for built-in Kubernetes resources as well as custom resources. + +### builtins/ + +This example implements a custom controller and webhooks for the *existing* ReplicaSet resource. + +* `controller.go`: implements a reconciler for ReplicaSets +* `mutatingwebhook.go`: implements a mutating webhook that adds an annotation to every incoming Pod ("example-mutating-admission-webhook" = "foo") +* `validatingwebhook.go`: implements a validating webhook that checks to see if a Pod has the aforementioned annotation +* `main.go` + 1. Creates a new manager + 2. Creates a new controller that watches both ReplicaSets and Pods and reconciles the objects with the implemented reconciler + 3. Registers the mutating and validating webhooks with the manager + 4. Starts the manager + +### crd/ + +This example implements a *new* Kubernetes resource, ChaosPod, and creates a custom controller that watches it and webhooks that mutate and validate. + +* `pkg/` + * `resource.go`: defines the schema for the ChaosPod API and implements validate and mutate webhooks + * `groupversion_info.go`: specifies the Group and Version for the ChaosPod API + * `zz_generated.deepcopy.go`: deep copy functions generated by kubebuilder +* `main.go` + 1. Creates a new manager + 2. Adds ChaosPod resource to the manager's schema + 3. Implements a reconciler to execute the desired behavior of the ChaosPod API + 4. Creates a new controller that watches ChaosPods and reconciles the objects with the implemented reconciler + 5. Adds ChaosPod webhooks to manager + 6. Starts the manager + +## Deploying and Running + +To install and run the provided examples, see the Kubebuilder [Quick Start](https://book.kubebuilder.io/quick-start.html). \ No newline at end of file diff --git a/examples/builtins/controller.go b/examples/builtins/controller.go index 9d94b97c76..443283140a 100644 --- a/examples/builtins/controller.go +++ b/examples/builtins/controller.go @@ -18,12 +18,12 @@ package main import ( "context" - - "github.com/go-logr/logr" + "fmt" appsv1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -31,27 +31,25 @@ import ( type reconcileReplicaSet struct { // client can be used to retrieve objects from the APIServer. client client.Client - log logr.Logger } // Implement reconcile.Reconciler so the controller can reconcile objects var _ reconcile.Reconciler = &reconcileReplicaSet{} -func (r *reconcileReplicaSet) Reconcile(request reconcile.Request) (reconcile.Result, error) { - // set up a convinient log object so we don't have to type request over and over again - log := r.log.WithValues("request", request) +func (r *reconcileReplicaSet) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + // set up a convenient log object so we don't have to type request over and over again + log := log.FromContext(ctx) // Fetch the ReplicaSet from the cache rs := &appsv1.ReplicaSet{} - err := r.client.Get(context.TODO(), request.NamespacedName, rs) - if errors.IsNotFound(err) { + err := r.client.Get(ctx, request.NamespacedName, rs) + if apierrors.IsNotFound(err) { log.Error(nil, "Could not find ReplicaSet") return reconcile.Result{}, nil } if err != nil { - log.Error(err, "Could not fetch ReplicaSet") - return reconcile.Result{}, err + return reconcile.Result{}, fmt.Errorf("could not fetch ReplicaSet: %+w", err) } // Print the ReplicaSet @@ -67,10 +65,9 @@ func (r *reconcileReplicaSet) Reconcile(request reconcile.Request) (reconcile.Re // Update the ReplicaSet rs.Labels["hello"] = "world" - err = r.client.Update(context.TODO(), rs) + err = r.client.Update(ctx, rs) if err != nil { - log.Error(err, "Could not write ReplicaSet") - return reconcile.Result{}, err + return reconcile.Result{}, fmt.Errorf("could not write ReplicaSet: %+w", err) } return reconcile.Result{}, nil diff --git a/examples/builtins/main.go b/examples/builtins/main.go index 70ce1a43e1..3a47814d8c 100644 --- a/examples/builtins/main.go +++ b/examples/builtins/main.go @@ -17,33 +17,24 @@ limitations under the License. package main import ( - "flag" "os" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/config" - "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" - logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager/signals" "sigs.k8s.io/controller-runtime/pkg/source" - "sigs.k8s.io/controller-runtime/pkg/webhook" ) -var log = logf.Log.WithName("example-controller") - func main() { - var disableWebhookConfigInstaller bool - flag.BoolVar(&disableWebhookConfigInstaller, "disable-webhook-config-installer", false, - "disable the installer in the webhook server, so it won't install webhook configuration resources during bootstrapping") - - flag.Parse() - logf.SetLogger(zap.Logger(false)) - entryLog := log.WithName("entrypoint") + ctrl.SetLogger(zap.New()) + entryLog := ctrl.Log.WithName("entrypoint") // Setup a Manager entryLog.Info("setting up manager") @@ -55,42 +46,29 @@ func main() { // Setup a new controller to reconcile ReplicaSets entryLog.Info("Setting up controller") - c, err := controller.New("foo-controller", mgr, controller.Options{ - Reconciler: &reconcileReplicaSet{client: mgr.GetClient(), log: log.WithName("reconciler")}, - }) - if err != nil { - entryLog.Error(err, "unable to set up individual controller") - os.Exit(1) - } - // Watch ReplicaSets and enqueue ReplicaSet object key - if err := c.Watch(&source.Kind{Type: &appsv1.ReplicaSet{}}, &handler.EnqueueRequestForObject{}); err != nil { - entryLog.Error(err, "unable to watch ReplicaSets") - os.Exit(1) - } - - // Watch Pods and enqueue owning ReplicaSet key - if err := c.Watch(&source.Kind{Type: &corev1.Pod{}}, - &handler.EnqueueRequestForOwner{OwnerType: &appsv1.ReplicaSet{}, IsController: true}); err != nil { - entryLog.Error(err, "unable to watch Pods") + err = ctrl. + NewControllerManagedBy(mgr). + Named("foo-controller"). + WatchesRawSource(source.Kind(mgr.GetCache(), &appsv1.ReplicaSet{}, + &handler.TypedEnqueueRequestForObject[*appsv1.ReplicaSet]{})). + WatchesRawSource(source.Kind(mgr.GetCache(), &corev1.Pod{}, + handler.TypedEnqueueRequestForOwner[*corev1.Pod](mgr.GetScheme(), mgr.GetRESTMapper(), &appsv1.ReplicaSet{}, handler.OnlyControllerOwner()))). + Complete(&reconcileReplicaSet{client: mgr.GetClient()}) + if err != nil { + entryLog.Error(err, "could not create controller") os.Exit(1) } - // Setup webhooks - entryLog.Info("setting up webhook server") - hookServer := &webhook.Server{ - Port: 9876, - CertDir: "/tmp/cert", - } - if err := mgr.Add(hookServer); err != nil { - entryLog.Error(err, "unable register webhook server with manager") + if err := ctrl.NewWebhookManagedBy(mgr). + For(&corev1.Pod{}). + WithDefaulter(&podAnnotator{}). + WithValidator(&podValidator{}). + Complete(); err != nil { + entryLog.Error(err, "unable to create webhook", "webhook", "Pod") os.Exit(1) } - entryLog.Info("registering webhooks to the webhook server") - hookServer.Register("/mutate-pods", &webhook.Admission{Handler: &podAnnotator{}}) - hookServer.Register("/validate-pods", &webhook.Admission{Handler: &podValidator{}}) - entryLog.Info("starting manager") if err := mgr.Start(signals.SetupSignalHandler()); err != nil { entryLog.Error(err, "unable to run manager") diff --git a/examples/builtins/mutatingwebhook.go b/examples/builtins/mutatingwebhook.go index 56b5cd2a2a..a588eba8f9 100644 --- a/examples/builtins/mutatingwebhook.go +++ b/examples/builtins/mutatingwebhook.go @@ -18,56 +18,31 @@ package main import ( "context" - "encoding/json" - "net/http" + "fmt" corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "k8s.io/apimachinery/pkg/runtime" + + logf "sigs.k8s.io/controller-runtime/pkg/log" ) -// podAnnotator annotates Pods -type podAnnotator struct { - client client.Client - decoder *admission.Decoder -} +// +kubebuilder:webhook:path=/mutate--v1-pod,mutating=true,failurePolicy=fail,groups="",resources=pods,verbs=create;update,versions=v1,name=mpod.kb.io -// podAnnotator adds an annotation to every incoming pods. -func (a *podAnnotator) Handle(ctx context.Context, req admission.Request) admission.Response { - pod := &corev1.Pod{} +// podAnnotator annotates Pods +type podAnnotator struct{} - err := a.decoder.Decode(req, pod) - if err != nil { - return admission.Errored(http.StatusBadRequest, err) +func (a *podAnnotator) Default(ctx context.Context, obj runtime.Object) error { + log := logf.FromContext(ctx) + pod, ok := obj.(*corev1.Pod) + if !ok { + return fmt.Errorf("expected a Pod but got a %T", obj) } if pod.Annotations == nil { pod.Annotations = map[string]string{} } pod.Annotations["example-mutating-admission-webhook"] = "foo" + log.Info("Annotated Pod") - marshaledPod, err := json.Marshal(pod) - if err != nil { - return admission.Errored(http.StatusInternalServerError, err) - } - - return admission.PatchResponseFromRaw(req.Object.Raw, marshaledPod) -} - -// podAnnotator implements inject.Client. -// A client will be automatically injected. - -// InjectClient injects the client. -func (a *podAnnotator) InjectClient(c client.Client) error { - a.client = c - return nil -} - -// podAnnotator implements admission.DecoderInjector. -// A decoder will be automatically injected. - -// InjectDecoder injects the decoder. -func (a *podAnnotator) InjectDecoder(d *admission.Decoder) error { - a.decoder = d return nil } diff --git a/examples/builtins/validatingwebhook.go b/examples/builtins/validatingwebhook.go index e185180e2b..1bee7f7c84 100644 --- a/examples/builtins/validatingwebhook.go +++ b/examples/builtins/validatingwebhook.go @@ -19,54 +19,48 @@ package main import ( "context" "fmt" - "net/http" corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" + "k8s.io/apimachinery/pkg/runtime" + + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) -// podValidator validates Pods -type podValidator struct { - client client.Client - decoder *admission.Decoder -} +// +kubebuilder:webhook:path=/validate--v1-pod,mutating=false,failurePolicy=fail,groups="",resources=pods,verbs=create;update,versions=v1,name=vpod.kb.io -// podValidator admits a pod iff a specific annotation exists. -func (v *podValidator) Handle(ctx context.Context, req admission.Request) admission.Response { - pod := &corev1.Pod{} +// podValidator validates Pods +type podValidator struct{} - err := v.decoder.Decode(req, pod) - if err != nil { - return admission.Errored(http.StatusBadRequest, err) +// validate admits a pod if a specific annotation exists. +func (v *podValidator) validate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + log := logf.FromContext(ctx) + pod, ok := obj.(*corev1.Pod) + if !ok { + return nil, fmt.Errorf("expected a Pod but got a %T", obj) } + log.Info("Validating Pod") key := "example-mutating-admission-webhook" anno, found := pod.Annotations[key] if !found { - return admission.Denied(fmt.Sprintf("missing annotation %s", key)) + return nil, fmt.Errorf("missing annotation %s", key) } if anno != "foo" { - return admission.Denied(fmt.Sprintf("annotation %s did not have value %q", key, "foo")) + return nil, fmt.Errorf("annotation %s did not have value %q", key, "foo") } - return admission.Allowed("") + return nil, nil } -// podValidator implements inject.Client. -// A client will be automatically injected. - -// InjectClient injects the client. -func (v *podValidator) InjectClient(c client.Client) error { - v.client = c - return nil +func (v *podValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + return v.validate(ctx, obj) } -// podValidator implements admission.DecoderInjector. -// A decoder will be automatically injected. +func (v *podValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + return v.validate(ctx, newObj) +} -// InjectDecoder injects the decoder. -func (v *podValidator) InjectDecoder(d *admission.Decoder) error { - v.decoder = d - return nil +func (v *podValidator) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + return v.validate(ctx, obj) } diff --git a/examples/conversion/pkg/apis/jobs/v1/doc.go b/examples/conversion/pkg/apis/jobs/v1/doc.go deleted file mode 100644 index 6b06db1687..0000000000 --- a/examples/conversion/pkg/apis/jobs/v1/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1 contains API Schema definitions for the jobs v1 API group -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=github.com/droot/crd-conversion-example/pkg/apis/jobs -// +k8s:defaulter-gen=TypeMeta -// +groupName=jobs.example.org -package v1 diff --git a/examples/conversion/pkg/apis/jobs/v2/doc.go b/examples/conversion/pkg/apis/jobs/v2/doc.go deleted file mode 100644 index 2a1052c95e..0000000000 --- a/examples/conversion/pkg/apis/jobs/v2/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v2 contains API Schema definitions for the jobs v2 API group -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=github.com/droot/crd-conversion-example/pkg/apis/jobs -// +k8s:defaulter-gen=TypeMeta -// +groupName=jobs.example.org -package v2 diff --git a/examples/crd/main.go b/examples/crd/main.go index 82dcd75c5d..0bf65c9890 100644 --- a/examples/crd/main.go +++ b/examples/crd/main.go @@ -29,12 +29,12 @@ import ( ctrl "sigs.k8s.io/controller-runtime" api "sigs.k8s.io/controller-runtime/examples/crd/pkg" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" ) var ( setupLog = ctrl.Log.WithName("setup") - recLog = ctrl.Log.WithName("reconciler") ) type reconciler struct { @@ -42,10 +42,9 @@ type reconciler struct { scheme *runtime.Scheme } -func (r *reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { - log := recLog.WithValues("chaospod", req.NamespacedName) +func (r *reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := log.FromContext(ctx).WithValues("chaospod", req.NamespacedName) log.V(1).Info("reconciling chaos pod") - ctx := context.Background() var chaospod api.ChaosPod if err := r.Get(ctx, req.NamespacedName, &chaospod); err != nil { @@ -66,7 +65,7 @@ func (r *reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { if podFound { shouldStop := chaospod.Spec.NextStop.Time.Before(time.Now()) if !shouldStop { - return ctrl.Result{RequeueAfter: chaospod.Spec.NextStop.Sub(time.Now()) + 1*time.Second}, nil + return ctrl.Result{RequeueAfter: time.Until(chaospod.Spec.NextStop.Time) + 1*time.Second}, nil } if err := r.Delete(ctx, &pod); err != nil { @@ -103,7 +102,7 @@ func (r *reconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { } func main() { - ctrl.SetLogger(zap.Logger(true)) + ctrl.SetLogger(zap.New()) mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{}) if err != nil { @@ -125,12 +124,19 @@ func main() { Client: mgr.GetClient(), scheme: mgr.GetScheme(), }) - if err != nil { setupLog.Error(err, "unable to create controller") os.Exit(1) } + err = ctrl.NewWebhookManagedBy(mgr). + For(&api.ChaosPod{}). + Complete() + if err != nil { + setupLog.Error(err, "unable to create webhook") + os.Exit(1) + } + setupLog.Info("starting manager") if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { setupLog.Error(err, "problem running manager") diff --git a/examples/crd/pkg/groupversion_info.go b/examples/crd/pkg/groupversion_info.go index eccef4121e..693d255b05 100644 --- a/examples/crd/pkg/groupversion_info.go +++ b/examples/crd/pkg/groupversion_info.go @@ -14,19 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package pkg contains API Schema definitions for the chaosapps v1 API group // +kubebuilder:object:generate=true // +groupName=chaosapps.metamagical.io package pkg import ( "k8s.io/apimachinery/pkg/runtime/schema" - logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "sigs.k8s.io/controller-runtime/pkg/scheme" ) var ( - log = logf.Log.WithName("chaospod-resource") - // SchemeGroupVersion is group version used to register these objects SchemeGroupVersion = schema.GroupVersion{Group: "chaosapps.metamagical.io", Version: "v1"} diff --git a/examples/crd/pkg/resource.go b/examples/crd/pkg/resource.go index 844490dfea..80800a23cb 100644 --- a/examples/crd/pkg/resource.go +++ b/examples/crd/pkg/resource.go @@ -17,13 +17,8 @@ limitations under the License. package pkg import ( - "fmt" - "time" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/webhook" ) // ChaosPodSpec defines the desired state of ChaosPod @@ -61,45 +56,6 @@ type ChaosPodList struct { Items []ChaosPod `json:"items"` } -// ValidateCreate implements webhookutil.validator so a webhook will be registered for the type -func (c *ChaosPod) ValidateCreate() error { - log.Info("validate create", "name", c.Name) - - if c.Spec.NextStop.Before(&metav1.Time{Time: time.Now()}) { - return fmt.Errorf(".spec.nextStop must be later than current time") - } - return nil -} - -// ValidateUpdate implements webhookutil.validator so a webhook will be registered for the type -func (c *ChaosPod) ValidateUpdate(old runtime.Object) error { - log.Info("validate update", "name", c.Name) - - if c.Spec.NextStop.Before(&metav1.Time{Time: time.Now()}) { - return fmt.Errorf(".spec.nextStop must be later than current time") - } - - oldC, ok := old.(*ChaosPod) - if !ok { - return fmt.Errorf("expect old object to be a %T instead of %T", oldC, old) - } - if c.Spec.NextStop.After(oldC.Spec.NextStop.Add(time.Hour)) { - return fmt.Errorf("it is not allowed to delay.spec.nextStop for more than 1 hour") - } - return nil -} - -var _ webhook.Defaulter = &ChaosPod{} - -// Default implements webhookutil.defaulter so a webhook will be registered for the type -func (c *ChaosPod) Default() { - log.Info("default", "name", c.Name) - - if c.Spec.NextStop.Before(&metav1.Time{Time: time.Now()}) { - c.Spec.NextStop = metav1.Time{Time: time.Now().Add(time.Minute)} - } -} - func init() { SchemeBuilder.Register(&ChaosPod{}, &ChaosPodList{}) } diff --git a/examples/multiclustersync/main.go b/examples/multiclustersync/main.go new file mode 100644 index 0000000000..e06b754222 --- /dev/null +++ b/examples/multiclustersync/main.go @@ -0,0 +1,178 @@ +package main + +import ( + "context" + "fmt" + "os" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +func main() { + if err := run(); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } +} + +const ( + sourceNamespace = "namespace-to-sync-all-secrets-from" + targetNamespace = "namespace-to-sync-all-secrets-to" +) + +func run() error { + log.SetLogger(zap.New()) + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{}) + if err != nil { + return fmt.Errorf("failed to construct manager: %w", err) + } + + allTargets := map[string]cluster.Cluster{} + + cluster, err := cluster.New(ctrl.GetConfigOrDie()) + if err != nil { + return fmt.Errorf("failed to construct clusters: %w", err) + } + if err := mgr.Add(cluster); err != nil { + return fmt.Errorf("failed to add cluster to manager: %w", err) + } + + // Add more target clusters here as needed + allTargets["self"] = cluster + + b := builder.TypedControllerManagedBy[request](mgr). + Named("secret-sync"). + // Watch secrets in the source namespace of the source cluster and + // create requests for each target cluster + WatchesRawSource(source.TypedKind( + mgr.GetCache(), + &corev1.Secret{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, s *corev1.Secret) []request { + if s.Namespace != sourceNamespace { + return nil + } + + result := make([]request, 0, len(allTargets)) + for targetCluster := range allTargets { + result = append(result, request{ + NamespacedName: types.NamespacedName{Namespace: s.Namespace, Name: s.Name}, + clusterName: targetCluster, + }) + } + + return result + }), + )). + WithOptions(controller.TypedOptions[request]{MaxConcurrentReconciles: 10}) + + for targetClusterName, targetCluster := range allTargets { + // Watch secrets in the target namespace of each target cluster + // and create a request for itself. + b = b.WatchesRawSource(source.TypedKind( + targetCluster.GetCache(), + &corev1.Secret{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, s *corev1.Secret) []request { + if s.Namespace != targetNamespace { + return nil + } + + return []request{{ + NamespacedName: types.NamespacedName{Namespace: sourceNamespace, Name: s.Name}, + clusterName: targetClusterName, + }} + }), + )) + } + + clients := make(map[string]client.Client, len(allTargets)) + for targetClusterName, targetCluster := range allTargets { + clients[targetClusterName] = targetCluster.GetClient() + } + + if err := b.Complete(&secretSyncReconciler{ + source: mgr.GetClient(), + targets: clients, + }); err != nil { + return fmt.Errorf("failed to build reconciler: %w", err) + } + + ctx := signals.SetupSignalHandler() + if err := mgr.Start(ctx); err != nil { + return fmt.Errorf("failed to start manager: %w", err) + } + + return nil +} + +type request struct { + types.NamespacedName + clusterName string +} + +// secretSyncReconciler is a simple reconciler that keeps all secrets in the source namespace of a given +// source cluster in sync with the secrets in the target namespace of all target clusters. +type secretSyncReconciler struct { + source client.Client + targets map[string]client.Client +} + +func (s *secretSyncReconciler) Reconcile(ctx context.Context, req request) (reconcile.Result, error) { + targetClient, found := s.targets[req.clusterName] + if !found { + return reconcile.Result{}, reconcile.TerminalError(fmt.Errorf("target cluster %s not found", req.clusterName)) + } + + var reference corev1.Secret + if err := s.source.Get(ctx, req.NamespacedName, &reference); err != nil { + if !apierrors.IsNotFound(err) { + return reconcile.Result{}, fmt.Errorf("failed to get secret %s from reference cluster: %w", req.String(), err) + } + if err := targetClient.Delete(ctx, &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ + Name: req.Name, + Namespace: targetNamespace, + }}); err != nil { + if !apierrors.IsNotFound(err) { + return reconcile.Result{}, fmt.Errorf("failed to delete secret %s/%s in cluster %s: %w", targetNamespace, req.Name, req.clusterName, err) + } + + return reconcile.Result{}, nil + } + + log.FromContext(ctx).Info("Deleted secret", "cluster", req.clusterName, "namespace", targetNamespace, "name", req.Name) + return reconcile.Result{}, nil + } + + target := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ + Name: reference.Name, + Namespace: targetNamespace, + }} + result, err := controllerutil.CreateOrUpdate(ctx, targetClient, target, func() error { + target.Data = reference.Data + return nil + }) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to upsert target secret %s/%s: %w", target.Namespace, target.Name, err) + } + + if result != controllerutil.OperationResultNone { + log.FromContext(ctx).Info("Upserted secret", "cluster", req.clusterName, "namespace", targetNamespace, "name", req.Name, "result", result) + } + + return reconcile.Result{}, nil +} diff --git a/examples/priorityqueue/main.go b/examples/priorityqueue/main.go new file mode 100644 index 0000000000..1dc10c2cbe --- /dev/null +++ b/examples/priorityqueue/main.go @@ -0,0 +1,76 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "fmt" + "os" + "time" + + "go.uber.org/zap/zapcore" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/builder" + kubeconfig "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/config" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func init() { +} + +func main() { + if err := run(); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } +} + +func run() error { + log.SetLogger(zap.New(func(o *zap.Options) { + o.Level = zapcore.Level(-5) + })) + + // Setup a Manager + mgr, err := manager.New(kubeconfig.GetConfigOrDie(), manager.Options{ + Controller: config.Controller{}, + }) + if err != nil { + return fmt.Errorf("failed to set up controller-manager: %w", err) + } + + if err := builder.ControllerManagedBy(mgr). + For(&corev1.ConfigMap{}). + Complete(reconcile.Func(func(ctx context.Context, r reconcile.Request) (reconcile.Result, error) { + log.FromContext(ctx).Info("Reconciling") + time.Sleep(10 * time.Second) + + return reconcile.Result{}, nil + })); err != nil { + return fmt.Errorf("failed to set up controller: %w", err) + } + + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + return fmt.Errorf("failed to start manager: %w", err) + } + + return nil +} diff --git a/examples/scratch-env/go.mod b/examples/scratch-env/go.mod new file mode 100644 index 0000000000..546c7c39ee --- /dev/null +++ b/examples/scratch-env/go.mod @@ -0,0 +1,70 @@ +module sigs.k8s.io/controller-runtime/examples/scratch-env + +go 1.24.0 + +require ( + github.com/spf13/pflag v1.0.6 + go.uber.org/zap v1.27.0 + sigs.k8s.io/controller-runtime v0.0.0-00010101000000-000000000000 +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.62.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sync v0.12.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect + golang.org/x/time v0.9.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/protobuf v1.36.5 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.34.1 // indirect + k8s.io/apiextensions-apiserver v0.34.1 // indirect + k8s.io/apimachinery v0.34.1 // indirect + k8s.io/client-go v0.34.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect +) + +replace sigs.k8s.io/controller-runtime => ../.. diff --git a/examples/scratch-env/go.sum b/examples/scratch-env/go.sum new file mode 100644 index 0000000000..012b88f447 --- /dev/null +++ b/examples/scratch-env/go.sum @@ -0,0 +1,197 @@ +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM= +k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk= +k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI= +k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc= +k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= +k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= +k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/examples/scratch-env/main.go b/examples/scratch-env/main.go new file mode 100644 index 0000000000..b8305ffed3 --- /dev/null +++ b/examples/scratch-env/main.go @@ -0,0 +1,132 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + goflag "flag" + "os" + + flag "github.com/spf13/pflag" + "go.uber.org/zap" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logzap "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var ( + crdPaths = flag.StringSlice("crd-paths", nil, "paths to files or directories containing CRDs to install on start") + webhookPaths = flag.StringSlice("webhook-paths", nil, "paths to files or directories containing webhook configurations to install on start") + attachControlPlaneOut = flag.Bool("debug-env", false, "attach to test env (apiserver & etcd) output -- just a convinience flag to force KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT=true") +) + +// have a separate function so we can return an exit code w/o skipping defers +func runMain() int { + loggerOpts := &logzap.Options{ + Development: true, // a sane default + ZapOpts: []zap.Option{zap.AddCaller()}, + } + { + var goFlagSet goflag.FlagSet + loggerOpts.BindFlags(&goFlagSet) + flag.CommandLine.AddGoFlagSet(&goFlagSet) + } + flag.Parse() + ctrl.SetLogger(logzap.New(logzap.UseFlagOptions(loggerOpts))) + ctrl.Log.Info("Starting...") + + log := ctrl.Log.WithName("main") + + env := &envtest.Environment{} + env.CRDInstallOptions.Paths = *crdPaths + env.WebhookInstallOptions.Paths = *webhookPaths + + if *attachControlPlaneOut { + os.Setenv("KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT", "true") + } + + log.Info("Starting apiserver & etcd") + cfg, err := env.Start() + if err != nil { + log.Error(err, "unable to start the test environment") + // shut down the environment in case we started it and failed while + // installing CRDs or provisioning users. + if err := env.Stop(); err != nil { + log.Error(err, "unable to stop the test environment after an error (this might be expected, but just though you should know)") + } + return 1 + } + + log.Info("apiserver running", "host", cfg.Host) + + // NB(directxman12): this group is unfortunately named, but various + // kubernetes versions require us to use it to get "admin" access. + user, err := env.ControlPlane.AddUser(envtest.User{ + Name: "envtest-admin", + Groups: []string{"system:masters"}, + }, nil) + if err != nil { + log.Error(err, "unable to provision admin user, continuing on without it") + return 1 + } + + // TODO(directxman12): add support for writing to a new context in an existing file + kubeconfigFile, err := os.CreateTemp("", "scratch-env-kubeconfig-") + if err != nil { + log.Error(err, "unable to create kubeconfig file, continuing on without it") + return 1 + } + defer os.Remove(kubeconfigFile.Name()) + + { + log := log.WithValues("path", kubeconfigFile.Name()) + log.V(1).Info("Writing kubeconfig") + + kubeConfig, err := user.KubeConfig() + if err != nil { + log.Error(err, "unable to create kubeconfig") + } + + if _, err := kubeconfigFile.Write(kubeConfig); err != nil { + log.Error(err, "unable to save kubeconfig") + return 1 + } + + log.Info("Wrote kubeconfig") + } + + if opts := env.WebhookInstallOptions; opts.LocalServingPort != 0 { + log.Info("webhooks configured for", "host", opts.LocalServingHost, "port", opts.LocalServingPort, "dir", opts.LocalServingCertDir) + } + + ctx := ctrl.SetupSignalHandler() + <-ctx.Done() + + log.Info("Shutting down apiserver & etcd") + err = env.Stop() + if err != nil { + log.Error(err, "unable to stop the test environment") + return 1 + } + + log.Info("Shutdown successful") + return 0 +} + +func main() { + os.Exit(runMain()) +} diff --git a/examples/tokenreview/main.go b/examples/tokenreview/main.go new file mode 100644 index 0000000000..d018956f96 --- /dev/null +++ b/examples/tokenreview/main.go @@ -0,0 +1,58 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "os" + + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + "sigs.k8s.io/controller-runtime/pkg/webhook/authentication" +) + +func init() { + log.SetLogger(zap.New()) +} + +func main() { + entryLog := log.Log.WithName("entrypoint") + + // Setup a Manager + entryLog.Info("setting up manager") + mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) + if err != nil { + entryLog.Error(err, "unable to set up overall controller manager") + os.Exit(1) + } + + // Setup webhooks + entryLog.Info("setting up webhook server") + hookServer := mgr.GetWebhookServer() + + entryLog.Info("registering webhooks to the webhook server") + hookServer.Register("/validate-v1-tokenreview", &authentication.Webhook{Handler: &authenticator{}}) + + entryLog.Info("starting manager") + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + entryLog.Error(err, "unable to run manager") + os.Exit(1) + } +} diff --git a/examples/tokenreview/tokenreview.go b/examples/tokenreview/tokenreview.go new file mode 100644 index 0000000000..16e4151077 --- /dev/null +++ b/examples/tokenreview/tokenreview.go @@ -0,0 +1,37 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + + v1 "k8s.io/api/authentication/v1" + + "sigs.k8s.io/controller-runtime/pkg/webhook/authentication" +) + +// authenticator validates tokenreviews +type authenticator struct { +} + +// Handle admits a request by the token. +func (a *authenticator) Handle(ctx context.Context, req authentication.Request) authentication.Response { + if req.Spec.Token == "invalid" { + return authentication.Unauthenticated("invalid is an invalid token", v1.UserInfo{}) + } + return authentication.Authenticated("", v1.UserInfo{}) +} diff --git a/examples/typed/main.go b/examples/typed/main.go new file mode 100644 index 0000000000..7245ce844d --- /dev/null +++ b/examples/typed/main.go @@ -0,0 +1,66 @@ +package main + +import ( + "context" + "fmt" + "os" + + networkingv1 "k8s.io/api/networking/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +func main() { + if err := run(); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } +} + +func run() error { + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{}) + if err != nil { + return fmt.Errorf("failed to construct manager: %w", err) + } + + // Use a request type that is always equal to itself so the workqueue + // de-duplicates all events. + // This can for example be useful for an ingress-controller that + // generates a config from all ingresses, rather than individual ones. + type request struct{} + + r := reconcile.TypedFunc[request](func(ctx context.Context, _ request) (reconcile.Result, error) { + ingressList := &networkingv1.IngressList{} + if err := mgr.GetClient().List(ctx, ingressList); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to list ingresses: %w", err) + } + + buildIngressConfig(ingressList) + return reconcile.Result{}, nil + }) + if err := builder.TypedControllerManagedBy[request](mgr). + WatchesRawSource(source.TypedKind( + mgr.GetCache(), + &networkingv1.Ingress{}, + handler.TypedEnqueueRequestsFromMapFunc(func(context.Context, *networkingv1.Ingress) []request { + return []request{{}} + })), + ). + Named("ingress_controller"). + Complete(r); err != nil { + return fmt.Errorf("failed to construct ingress-controller: %w", err) + } + + ctx := signals.SetupSignalHandler() + if err := mgr.Start(ctx); err != nil { + return fmt.Errorf("failed to start manager: %w", err) + } + + return nil +} + +func buildIngressConfig(*networkingv1.IngressList) {} diff --git a/go.mod b/go.mod index 568ccb60da..4d998fe2fc 100644 --- a/go.mod +++ b/go.mod @@ -1,50 +1,103 @@ module sigs.k8s.io/controller-runtime -go 1.11 +go 1.24.0 require ( - cloud.google.com/go v0.26.0 // indirect - github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 - github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect - github.com/evanphx/json-patch v4.1.0+incompatible - github.com/go-logr/logr v0.1.0 - github.com/go-logr/zapr v0.1.0 - github.com/gogo/protobuf v1.1.1 // indirect - github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 // indirect - github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect - github.com/googleapis/gnostic v0.2.0 // indirect - github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 // indirect - github.com/imdario/mergo v0.3.6 // indirect - github.com/json-iterator/go v1.1.5 // indirect - github.com/kr/pretty v0.1.0 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/evanphx/json-patch/v5 v5.9.11 + github.com/fsnotify/fsnotify v1.9.0 + github.com/go-logr/logr v1.4.2 + github.com/go-logr/zapr v1.3.0 + github.com/google/btree v1.1.3 + github.com/google/go-cmp v0.7.0 + github.com/google/gofuzz v1.2.0 + github.com/onsi/ginkgo/v2 v2.22.0 + github.com/onsi/gomega v1.36.1 + github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/client_model v0.6.1 + go.uber.org/goleak v1.3.0 + go.uber.org/zap v1.27.0 + golang.org/x/mod v0.21.0 + golang.org/x/sync v0.12.0 + golang.org/x/sys v0.31.0 + gomodules.xyz/jsonpatch/v2 v2.4.0 + gopkg.in/evanphx/json-patch.v4 v4.12.0 // Using v4 to match upstream + k8s.io/api v0.34.1 + k8s.io/apiextensions-apiserver v0.34.1 + k8s.io/apimachinery v0.34.1 + k8s.io/apiserver v0.34.1 + k8s.io/client-go v0.34.1 + k8s.io/klog/v2 v2.130.1 + k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 + sigs.k8s.io/yaml v1.6.0 +) + +require ( + cel.dev/expr v0.24.0 // indirect + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/cel-go v0.26.0 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 // indirect - github.com/onsi/ginkgo v1.6.0 - github.com/onsi/gomega v1.4.2 - github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c // indirect - github.com/pkg/errors v0.8.1 // indirect - github.com/prometheus/client_golang v0.9.0 - github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 - github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e // indirect - github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 // indirect - github.com/spf13/pflag v1.0.2 // indirect - go.uber.org/atomic v1.3.2 // indirect - go.uber.org/multierr v1.1.0 // indirect - go.uber.org/zap v1.9.1 - golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac // indirect - golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be // indirect - golang.org/x/sync v0.0.0-20190423024810-112230192c58 // indirect - golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 // indirect - google.golang.org/appengine v1.1.0 // indirect - gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/common v0.62.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/spf13/cobra v1.9.1 // indirect + github.com/spf13/pflag v1.0.6 // indirect + github.com/stoewer/go-strcase v1.3.0 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect + go.opentelemetry.io/otel/metric v1.35.0 // indirect + go.opentelemetry.io/otel/sdk v1.34.0 // indirect + go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.opentelemetry.io/proto/otlp v1.5.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect + golang.org/x/time v0.9.0 // indirect + golang.org/x/tools v0.26.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect + google.golang.org/grpc v1.72.1 // indirect + google.golang.org/protobuf v1.36.5 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b - k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 - k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d - k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible - k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c // indirect - k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 // indirect - sigs.k8s.io/testing_frameworks v0.1.1 - sigs.k8s.io/yaml v1.1.0 + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/component-base v0.34.1 // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect ) diff --git a/go.sum b/go.sum index b5e1bdd0c3..d6278d8a7d 100644 --- a/go.sum +++ b/go.sum @@ -1,125 +1,259 @@ -cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 h1:Kn3rqvbUFqSepE2OqVu0Pn1CbDw9IuMlONapol0zuwk= -github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= +github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.1.0+incompatible h1:K1MDoo4AZ4wU0GIU/fPmtZg7VpzLjCxu+UwBD1FvwOc= -github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= -github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 h1:u4bArs140e9+AfE52mFHOXVFnOSBJBRlzTHrOPLOIhE= -github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= -github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= -github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw= -github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= -github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI= +github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I= -github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c h1:MUyE44mTvnI5A0xrxIxaMqoWFzPfQvtE2IWUollMDMs= -github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.0 h1:tXuTFVHC03mW0D+Ua1Q2d1EAVqLTuggX50V0VLICCzY= -github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc= -github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac h1:7d7lG9fHOLdL6jZPtnV4LpI41SbohIJ1Atq7U991dMg= -golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= +go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= +google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= +google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b h1:aBGgKJUM9Hk/3AE8WaZIApnTxG35kbuQba2w+SXqezo= -k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 h1:q1Qvjzs/iEdXF6A1a8H3AKVFDzJNcJn3nXMs6R6qFtA= -k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= -k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d h1:Jmdtdt1ZnoGfWWIIik61Z7nKYgO3J+swQJtPYsP9wHA= -k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible h1:U5Bt+dab9K8qaUmXINrkXO135kA11/i5Kg1RUydgaMQ= -k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= -k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c h1:3KSCztE7gPitlZmWbNwue/2U0YruD65DqX3INopDAQM= -k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= -k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 h1:VBM/0P5TWxwk+Nw6Z+lAw3DKgO76g90ETOiA6rfLV1Y= -k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -sigs.k8s.io/testing_frameworks v0.1.1 h1:cP2l8fkA3O9vekpy5Ks8mmA0NW/F7yBdXf8brkWhVrs= -sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM= +k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk= +k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI= +k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc= +k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= +k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/apiserver v0.34.1 h1:U3JBGdgANK3dfFcyknWde1G6X1F4bg7PXuvlqt8lITA= +k8s.io/apiserver v0.34.1/go.mod h1:eOOc9nrVqlBI1AFCvVzsob0OxtPZUCPiUJL45JOTBG0= +k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= +k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8= +k8s.io/component-base v0.34.1 h1:v7xFgG+ONhytZNFpIz5/kecwD+sUhVE6HU7qQUiRM4A= +k8s.io/component-base v0.34.1/go.mod h1:mknCpLlTSKHzAQJJnnHVKqjxR7gBeHRv0rPXA7gdtQ0= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/hack/apidiff.sh b/hack/apidiff.sh new file mode 100755 index 0000000000..a15342d16a --- /dev/null +++ b/hack/apidiff.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +source $(dirname ${BASH_SOURCE})/common.sh + +REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +cd "${REPO_ROOT}" + +export GOTOOLCHAIN="go$(make --silent go-version)" + +header_text "verifying api diff" +echo "*** Running go-apidiff ***" +APIDIFF_OLD_COMMIT="${PULL_BASE_SHA}" make verify-apidiff diff --git a/hack/check-everything.sh b/hack/check-everything.sh index 8be828f447..84db032176 100755 --- a/hack/check-everything.sh +++ b/hack/check-everything.sh @@ -14,84 +14,34 @@ # See the License for the specific language governing permissions and # limitations under the License. -set -e +set -o errexit +set -o nounset +set -o pipefail hack_dir=$(dirname ${BASH_SOURCE}) source ${hack_dir}/common.sh -k8s_version=1.14.1 -goarch=amd64 -goos="unknown" - -if [[ "$OSTYPE" == "linux-gnu" ]]; then - goos="linux" -elif [[ "$OSTYPE" == "darwin"* ]]; then - goos="darwin" -fi - -if [[ "$goos" == "unknown" ]]; then - echo "OS '$OSTYPE' not supported. Aborting." >&2 - exit 1 -fi - tmp_root=/tmp kb_root_dir=$tmp_root/kubebuilder -# Skip fetching and untaring the tools by setting the SKIP_FETCH_TOOLS variable -# in your environment to any value: -# -# $ SKIP_FETCH_TOOLS=1 ./test.sh -# -# If you skip fetching tools, this script will use the tools already on your -# machine, but rebuild the kubebuilder and kubebuilder-bin binaries. -SKIP_FETCH_TOOLS=${SKIP_FETCH_TOOLS:-""} - -# fetch k8s API gen tools and make it available under kb_root_dir/bin. -function fetch_kb_tools { - header_text "fetching tools" - kb_tools_archive_name="kubebuilder-tools-$k8s_version-$goos-$goarch.tar.gz" - kb_tools_download_url="https://storage.googleapis.com/kubebuilder-tools/$kb_tools_archive_name" +export GOTOOLCHAIN="go$(make --silent go-version)" - kb_tools_archive_path="$tmp_root/$kb_tools_archive_name" - if [ ! -f $kb_tools_archive_path ]; then - curl -sL ${kb_tools_download_url} -o "$kb_tools_archive_path" - fi - tar -zvxf "$kb_tools_archive_path" -C "$tmp_root/" -} - -function is_installed { - if [ command -v $1 &>/dev/null ]; then - return 0 - fi - return 1 -} - -function fetch_go_tools { - header_text "Checking for gometalinter.v2" - if ! is_installed golangci-lint; then - header_text "Installing golangci-lint" - GO111MODULE=on go get github.com/golangci/golangci-lint/cmd/golangci-lint@v1.15.0 - fi - - header_text "Checking for dep" - if ! is_installed dep; then - header_text "Installing dep" - # can't install dep with modules from source due to an issue with semver, - # so install from the compiled binary instead - curl -sL -o $(go env GOPATH)/bin/dep https://github.com/golang/dep/releases/download/0.5.2/dep-$(go env GOOS)-$(go env GOARCH) - fi -} - -header_text "using tools" +# Run verification scripts. +${hack_dir}/verify.sh -if [ -z "$SKIP_FETCH_TOOLS" ]; then - fetch_go_tools - fetch_kb_tools -fi +# Envtest. +ENVTEST_K8S_VERSION=${ENVTEST_K8S_VERSION:-"1.32.0"} -setup_envs +header_text "installing envtest tools@${ENVTEST_K8S_VERSION} with setup-envtest if necessary" +tmp_bin=/tmp/cr-tests-bin +( + # don't presume to install for the user + cd ${hack_dir}/../tools/setup-envtest + GOBIN=${tmp_bin} go install . +) +export KUBEBUILDER_ASSETS="$(${tmp_bin}/setup-envtest use --use-env -p path "${ENVTEST_K8S_VERSION}")" -${hack_dir}/verify.sh +# Run tests. ${hack_dir}/test-all.sh header_text "confirming examples compile (via go install)" diff --git a/hack/ci-check-everything.sh b/hack/ci-check-everything.sh new file mode 100755 index 0000000000..39b85e91d8 --- /dev/null +++ b/hack/ci-check-everything.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +export TRACE=1 + +# Not included or existing by default in Prow +export PATH=$(go env GOPATH)/bin:$PATH +mkdir -p $(go env GOPATH)/bin + +$(dirname ${BASH_SOURCE})/check-everything.sh diff --git a/hack/common.sh b/hack/common.sh index 435749c96e..eff91ba7b7 100755 --- a/hack/common.sh +++ b/hack/common.sh @@ -51,12 +51,3 @@ fi function header_text { echo "$header$*$reset" } - -function setup_envs { - header_text "setting up env vars" - - # Setup env vars - if [[ -z "${KUBEBUILDER_ASSETS}" ]]; then - export KUBEBUILDER_ASSETS=$kb_root_dir/bin - fi -} diff --git a/hack/go-install.sh b/hack/go-install.sh new file mode 100755 index 0000000000..a07b8e0f11 --- /dev/null +++ b/hack/go-install.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +# Copyright 2021 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +if [ -z "${1}" ]; then + echo "must provide module as first parameter" + exit 1 +fi + +if [ -z "${2}" ]; then + echo "must provide binary name as second parameter" + exit 1 +fi + +if [ -z "${3}" ]; then + echo "must provide version as third parameter" + exit 1 +fi + +if [ -z "${GOBIN}" ]; then + echo "GOBIN is not set. Must set GOBIN to install the bin in a specified directory." + exit 1 +fi + +rm -f "${GOBIN}/${2}"* || true + +# install the golang module specified as the first argument +go install "${1}@${3}" +mv "${GOBIN}/${2}" "${GOBIN}/${2}-${3}" +ln -sf "${GOBIN}/${2}-${3}" "${GOBIN}/${2}" diff --git a/hack/release/common.sh b/hack/release/common.sh deleted file mode 100644 index 4e9fbbf934..0000000000 --- a/hack/release/common.sh +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/env bash -shopt -s extglob - -cr_major_pattern=":warning:|$(printf "\xe2\x9a\xa0")" -cr_minor_pattern=":sparkles:|$(printf "\xe2\x9c\xa8")" -cr_patch_pattern=":bug:|$(printf "\xf0\x9f\x90\x9b")" -cr_docs_pattern=":book:|$(printf "\xf0\x9f\x93\x96")" -cr_other_pattern=":running:|$(printf "\xf0\x9f\x8f\x83")" - -# cr::symbol-type-raw turns :xyz: and the corresponding emoji -# into one of "major", "minor", "patch", "docs", "other", or -# "unknown", ignoring the '!' -cr::symbol-type-raw() { - case $1 in - @(${cr_major_pattern})?('!')) - echo "major" - ;; - @(${cr_minor_pattern})?('!')) - echo "minor" - ;; - @(${cr_patch_pattern})?('!')) - echo "patch" - ;; - @(${cr_docs_pattern})?('!')) - echo "docs" - ;; - @(${cr_other_pattern})?('!')) - echo "other" - ;; - *) - echo "unknown" - ;; - esac -} - -# cr::symbol-type turns :xyz: and the corresponding emoji -# into one of "major", "minor", "patch", "docs", "other", or -# "unknown". -cr::symbol-type() { - local type_raw=$(cr::symbol-type-raw $1) - if [[ ${type_raw} == "unknown" ]]; then - echo "unknown" - return - fi - - if [[ $1 == *'!' ]]; then - echo "major" - return - fi - - echo ${type_raw} -} - -# git::is-release-branch-name checks if its argument is a release branch name -# (release-0.Y or release-X). -git::is-release-branch-name() { - [[ ${1-must specify release branch name to check} =~ release-((0\.[[:digit:]])|[[:digit:]]+) ]] -} - -# git::ensure-release-branch checks that we're on a release branch -git::ensure-release-branch() { - local current_branch=$(git rev-parse --abbrev-ref HEAD) - if ! git::is-release-branch-name ${current_branch}; then - echo "branch ${current_branch} does not appear to be a release branch (release-X)" >&2 - exit 1 - fi -} - -# git::export-current-version outputs the current version -# as exported variables (${maj,min,patch}_ver, last_tag) after -# checking that we're on the right release branch. -git::export-current-version() { - # make sure we're on a release branch - git::ensure-release-branch - - # deal with the release-0.1 branch, or similar - local release_ver=${BASH_REMATCH[1]} - maj_ver=${release_ver} - local tag_pattern='v${maj_ver}.([[:digit:]]+).([[:digit]]+)' - if [[ ${maj_ver} =~ 0\.([[:digit:]]+) ]]; then - maj_ver=0 - min_ver=${BASH_REMATCH[1]} - local tag_pattern="v0.(${min_ver}).([[:digit:]]+)" - fi - - # make sure we've got a tag that matches our release branch - last_tag=$(git describe --tags --abbrev=0) # try to fetch just the "current" tag name - if [[ ! ${last_tag} =~ ${tag_pattern} ]]; then - echo "tag ${last_tag} does not appear to be a release for this release (${release_ver})-- it should be v${maj_ver}.Y.Z" >&2 - exit 1 - fi - - export min_ver=${BASH_REMATCH[1]} - export patch_ver=${BASH_REMATCH[2]} - export maj_ver=${maj_ver} - export last_tag=${last_tag} -} - -# git::next-version figures out the next version to tag -# (it also sets the current version variables to the current version) -git::next-version() { - git::export-current-version - - local feature_commits=$(git rev-list ${last_tag}..${end_range} --grep="${cr_minor_pattern}") - local breaking_commits=$(git rev-list ${last_tag}..${end_range} --grep="${cr_major_pattern}") - - if [[ -z ${breaking_commits} && ${maj_ver} > 0 ]]; then - local next_ver="v$(( maj_ver + 1 )).0.0" - elif [[ -z ${feature_commits} ]]; then - local next_ver="v${maj_ver}.$(( min_ver + 1 )).0" - else - local next_ver="v${maj_ver}.${min_ver}.$(( patch_ver + 1 ))" - fi - - echo "${next_ver}" -} diff --git a/hack/release/release-notes.sh b/hack/release/release-notes.sh deleted file mode 100755 index 1a98343af4..0000000000 --- a/hack/release/release-notes.sh +++ /dev/null @@ -1,103 +0,0 @@ -#!/usr/bin/env bash - -set -e -set -o pipefail - -# import our common stuff -source "$(dirname ${BASH_SOURCE})/common.sh" - -# TODO: work with both release branch and major release -git::ensure-release-branch -git::export-current-version -# check the next version -next_ver=$(git::next-version) - -features="" -bugfixes="" -breaking="" -unknown="" -MERGE_PR="Merge pull request #([[:digit:]]+) from ([[:alnum:]-]+)/.+" -NEWLINE=" -" -head_commit=$(git rev-parse HEAD) -while read commit_word commit; do - read title - read # skip the blank line - read prefix body - - if [[ ${prefix} == v*.*.* && ( ${commit} == ${head_commit} || $(git tag --points-at ${commit}) == v*.*.* ) ]]; then - # skip version merges - continue - fi - set +x - if [[ ! ${title} =~ ${MERGE_PR} ]]; then - echo "Unable to determine PR number for merge ${commit} with title '${title}', aborting." >&2 - exit 1 - fi - pr_number=${BASH_REMATCH[1]} - pr_type=$(cr::symbol-type ${prefix}) - pr_title=${body} - if [[ ${pr_type} == "unknown" ]]; then - pr_title="${prefix} ${pr_title}" - fi - case ${pr_type} in - major) - breaking="${breaking}- ${pr_title} (#${pr_number})${NEWLINE}" - ;; - minor) - features="${features}- ${pr_title} (#${pr_number})${NEWLINE}" - ;; - patch) - bugfixes="${bugfixes}- ${pr_title} (#${pr_number})${NEWLINE}" - ;; - docs|other) - # skip non-code-changes - ;; - unknown) - unknown="${unknown}- ${pr_title} (#${pr_number})${NEWLINE}" - ;; - *) - echo "unknown PR type '${pr_type}' on PR '${pr_title}'" >&2 - exit 1 - esac -done <<<$(git rev-list ${last_tag}..HEAD --merges --pretty=format:%B) - -# TODO: sort non merge commits with tags - -[[ -n "${breaking}" ]] && printf '\e[1;31mbreaking changes this version\e[0m' >&2 -[[ -n "${unknown}" ]] && printf '\e[1;35munknown changes in this release -- categorize manually\e[0m' >&2 - -echo "" >&2 -echo "" >&2 -echo "# ${next_ver}" - -if [[ -n ${breaking} ]]; then - echo "" - echo "## :warning: Breaking Changes" - echo "" - echo "${breaking}" -fi - -if [[ -n ${features} ]]; then - echo "" - echo "## :sparkles: New Features" - echo "" - echo "${features}" -fi - -if [[ -n ${bugfixes} ]]; then - echo "" - echo "## :bug: Bug Fixes" - echo "" - echo "${bugfixes}" -fi - -if [[ -n ${unknown} ]]; then - echo "" - echo "## :question: *categorize these manually*" - echo "" - echo "${unknown}" -fi - -echo "" -echo "*Thanks to all our contributors!*" diff --git a/hack/test-all.sh b/hack/test-all.sh index 2970b9d0d6..34d841cfd0 100755 --- a/hack/test-all.sh +++ b/hack/test-all.sh @@ -18,20 +18,23 @@ set -e source $(dirname ${BASH_SOURCE})/common.sh -setup_envs - header_text "running go test" -go test ${MOD_OPT} ./... -parallel 4 - -header_text "running coverage" +if [[ -n ${ARTIFACTS:-} ]]; then + GINKGO_ARGS="-ginkgo.junit-report=junit-report.xml" +fi -# Verify no coverage regressions have been introduced. Remove the exception list from here -# once the coverage has been brought back up -if [[ ! $(go test ${MOD_OPT} ./pkg/... -coverprofile cover.out -parallel 4 | grep -v "coverage: 100.0% of statements" | grep "controller-runtime/pkg " | grep -v "controller-runtime/pkg \|controller-runtime/pkg/recorder \|pkg/cache\|pkg/client \|pkg/event \|pkg/client/config \|pkg/controller/controllertest \|pkg/reconcile/reconciletest \|pkg/test ") ]]; then -echo "ok" -else -go test ${MOD_OPT} ./pkg/... -coverprofile cover.out -parallel 4 | grep -v "coverage: 100.0% of statements" | grep "controller-runtime/pkg " | grep -v "controller-runtime/pkg \|controller-runtime/pkg/recorder \|pkg/cache\|pkg/client \|pkg/event \|pkg/client/config \|pkg/controller/controllertest \|pkg/reconcile/reconciletest \|pkg/test " -echo "missing test coverage" -exit 1 +result=0 +go test -v -race ${P_FLAG} ${MOD_OPT} ./... --ginkgo.fail-fast ${GINKGO_ARGS} || result=$? + +if [[ -n ${ARTIFACTS:-} ]]; then + mkdir -p ${ARTIFACTS} + for file in `find . -name *junit-report.xml`; do + new_file=${file#./} + new_file=${new_file%/junit-report.xml} + new_file=${new_file//"/"/"-"} + mv "$file" "$ARTIFACTS/junit_${new_file}.xml" + done fi + +exit $result diff --git a/hack/tools/cmd/gomodcheck/main.go b/hack/tools/cmd/gomodcheck/main.go new file mode 100644 index 0000000000..5cbaf377e2 --- /dev/null +++ b/hack/tools/cmd/gomodcheck/main.go @@ -0,0 +1,204 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "strings" + + "go.uber.org/zap" + "golang.org/x/mod/modfile" + "sigs.k8s.io/yaml" +) + +const ( + modFile = "./go.mod" +) + +type config struct { + UpstreamRefs []string `json:"upstreamRefs"` + ExcludedModules []string `json:"excludedModules"` +} + +type upstream struct { + Ref string `json:"ref"` + Version string `json:"version"` +} + +// representation of an out of sync module +type oosMod struct { + Name string `json:"name"` + Version string `json:"version"` + Upstreams []upstream `json:"upstreams"` +} + +func main() { + l, _ := zap.NewProduction() + logger := l.Sugar() + + if len(os.Args) < 2 { + fmt.Printf("USAGE: %s [PATH_TO_CONFIG_FILE]\n", os.Args[0]) + os.Exit(1) + } + + // --- 1. parse config + b, err := os.ReadFile(os.Args[1]) + if err != nil { + fatal(err) + } + + cfg := new(config) + if err := yaml.Unmarshal(b, cfg); err != nil { + fatal(err) + } + + excludedMods := make(map[string]any) + for _, mod := range cfg.ExcludedModules { + excludedMods[mod] = nil + } + + // --- 2. project mods + projectModules, err := modulesFromGoModFile() + if err != nil { + fatal(err) + } + + // --- 3. upstream mods + upstreamModules, err := modulesFromUpstreamModGraph(cfg.UpstreamRefs) + if err != nil { + fatal(err) + } + + oosMods := make([]oosMod, 0) + + // --- 4. validate + // for each module in our project, + // if it matches an upstream module, + // then for each upstream module, + // if project module version doesn't match upstream version, + // then we add the version and the ref to the list of out of sync modules. + for mod, version := range projectModules { + if _, ok := excludedMods[mod]; ok { + logger.Infof("skipped: %s", mod) + continue + } + + if versionToRef, ok := upstreamModules[mod]; ok { + outOfSyncUpstream := make([]upstream, 0) + + for upstreamVersion, upstreamRef := range versionToRef { + if version == upstreamVersion { // pass if version in sync. + continue + } + + outOfSyncUpstream = append(outOfSyncUpstream, upstream{ + Ref: upstreamRef, + Version: upstreamVersion, + }) + } + + if len(outOfSyncUpstream) == 0 { // pass if no out of sync upstreams. + continue + } + + oosMods = append(oosMods, oosMod{ + Name: mod, + Version: version, + Upstreams: outOfSyncUpstream, + }) + } + } + + if len(oosMods) == 0 { + fmt.Println("🎉 Success!") + os.Exit(0) + } + + b, err = json.MarshalIndent(map[string]any{"outOfSyncModules": oosMods}, "", " ") + if err != nil { + fatal(err) + } + + fmt.Println(string(b)) + os.Exit(1) +} + +func modulesFromGoModFile() (map[string]string, error) { + b, err := os.ReadFile(modFile) + if err != nil { + return nil, err + } + + f, err := modfile.Parse(modFile, b, nil) + if err != nil { + return nil, err + } + + out := make(map[string]string) + for _, mod := range f.Require { + out[mod.Mod.Path] = mod.Mod.Version + } + + return out, nil +} + +func modulesFromUpstreamModGraph(upstreamRefList []string) (map[string]map[string]string, error) { + b, err := exec.Command("go", "mod", "graph").Output() + if err != nil { + return nil, err + } + + graph := string(b) + + // upstreamRefs is a set of user specified upstream modules. + // The set has 2 functions: + // 1. Check if `go mod graph` modules are one of the user specified upstream modules. + // 2. Mark if a user specified upstream module was found in the module graph. + // If a user specified upstream module is not found, gomodcheck will exit with an error. + upstreamRefs := make(map[string]bool) + for _, ref := range upstreamRefList { + upstreamRefs[ref] = false + } + + modToVersionToUpstreamRef := make(map[string]map[string]string) + for _, line := range strings.Split(graph, "\n") { + ref := strings.SplitN(line, "@", 2)[0] + + if _, ok := upstreamRefs[ref]; !ok { + continue + } + + upstreamRefs[ref] = true // mark the ref as found + + kv := strings.SplitN(strings.SplitN(line, " ", 2)[1], "@", 2) + name := kv[0] + version := kv[1] + + if _, ok := modToVersionToUpstreamRef[name]; !ok { + modToVersionToUpstreamRef[name] = make(map[string]string) + } + + modToVersionToUpstreamRef[name][version] = ref + } + + notFoundErr := "" + for ref, found := range upstreamRefs { + if !found { + notFoundErr = fmt.Sprintf("%s%s, ", notFoundErr, ref) + } + } + + if notFoundErr != "" { + return nil, fmt.Errorf("cannot verify modules: "+ + "the following specified upstream module(s) cannot be found in go.mod: [ %s ]", + strings.TrimSuffix(notFoundErr, ", ")) + } + + return modToVersionToUpstreamRef, nil +} + +func fatal(err error) { + fmt.Printf("❌ %s\n", err.Error()) + os.Exit(1) +} diff --git a/hack/verify-pr-title.sh b/hack/verify-pr-title.sh new file mode 100755 index 0000000000..a556b0172b --- /dev/null +++ b/hack/verify-pr-title.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +# Copyright 2024 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Define regex patterns +WIP_REGEX="^\W?WIP\W" +TAG_REGEX="^\[[[:alnum:]\._-]*\]" +PR_TITLE="$1" + +# Trim WIP and tags from title +trimmed_title=$(echo "$PR_TITLE" | sed -E "s/$WIP_REGEX//" | sed -E "s/$TAG_REGEX//" | xargs) + +# Normalize common emojis in text form to actual emojis +trimmed_title=$(echo "$trimmed_title" | sed -E "s/:warning:/⚠/g") +trimmed_title=$(echo "$trimmed_title" | sed -E "s/:sparkles:/✨/g") +trimmed_title=$(echo "$trimmed_title" | sed -E "s/:bug:/🐛/g") +trimmed_title=$(echo "$trimmed_title" | sed -E "s/:book:/📖/g") +trimmed_title=$(echo "$trimmed_title" | sed -E "s/:rocket:/🚀/g") +trimmed_title=$(echo "$trimmed_title" | sed -E "s/:seedling:/🌱/g") + +# Check PR type prefix +if [[ "$trimmed_title" =~ ^(⚠|✨|🐛|📖|🚀|🌱) ]]; then + echo "PR title is valid: $trimmed_title" +else + echo "Error: No matching PR type indicator found in title." + echo "You need to have one of these as the prefix of your PR title:" + echo "- Breaking change: ⚠ (:warning:)" + echo "- Non-breaking feature: ✨ (:sparkles:)" + echo "- Patch fix: 🐛 (:bug:)" + echo "- Docs: 📖 (:book:)" + echo "- Release: 🚀 (:rocket:)" + echo "- Infra/Tests/Other: 🌱 (:seedling:)" + exit 1 +fi + +# Check that PR title does not contain Issue or PR number +if [[ "$trimmed_title" =~ \#[0-9]+ ]]; then + echo "Error: PR title should not contain issue or PR number." + echo "Issue numbers belong in the PR body as either \"Fixes #XYZ\" (if it closes the issue or PR), or something like \"Related to #XYZ\" (if it's just related)." + exit 1 +fi + diff --git a/hack/verify.sh b/hack/verify.sh index 1d8b01a8a8..4ce46d15d1 100755 --- a/hack/verify.sh +++ b/hack/verify.sh @@ -18,37 +18,18 @@ set -e source $(dirname ${BASH_SOURCE})/common.sh -header_text "running go vet" +REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +cd "${REPO_ROOT}" -go vet ${MOD_OPT} ./... +header_text "running modules" +make modules -header_text "running golangci-lint" - -golangci-lint run --disable-all \ - --deadline 5m \ - --enable=misspell \ - --enable=structcheck \ - --enable=golint \ - --enable=deadcode \ - --enable=errcheck \ - --enable=varcheck \ - --enable=goconst \ - --enable=unparam \ - --enable=ineffassign \ - --enable=nakedret \ - --enable=interfacer \ - --enable=misspell \ - --enable=gocyclo \ - --enable=lll \ - --enable=dupl \ - --enable=goimports \ - ./pkg/... ./examples/... . +# Only run verify-modules in CI, otherwise updating +# go module locally (which is a valid operation) causes `make test` to fail. +if [[ -n ${CI} ]]; then + header_text "verifying modules" + make verify-modules +fi -# TODO: Enable these as we fix them to make them pass -# --enable=gosec \ -# --enable=maligned \ -# --enable=safesql \ - -header_text "running dep check" -dep check -skip-vendor # vendor is maintained by go modules -GO111MODULES=on go list -mod=readonly ./... +header_text "running golangci-lint" +make lint diff --git a/pkg/builder/build.go b/pkg/builder/build.go deleted file mode 100644 index f8692be072..0000000000 --- a/pkg/builder/build.go +++ /dev/null @@ -1,304 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package builder - -import ( - "fmt" - "strings" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - "sigs.k8s.io/controller-runtime/pkg/client/config" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "sigs.k8s.io/controller-runtime/pkg/webhook/conversion" -) - -// Supporting mocking out functions for testing -var getConfig = config.GetConfig -var newController = controller.New -var newManager = manager.New -var getGvk = apiutil.GVKForObject - -// Builder builds a Controller. -type Builder struct { - apiType runtime.Object - mgr manager.Manager - predicates []predicate.Predicate - managedObjects []runtime.Object - watchRequest []watchRequest - config *rest.Config - ctrl controller.Controller -} - -// SimpleController returns a new Builder. -// -// Deprecated: Use ControllerManagedBy(Manager) instead. -func SimpleController() *Builder { - return &Builder{} -} - -// ControllerManagedBy returns a new controller builder that will be started by the provided Manager -func ControllerManagedBy(m manager.Manager) *Builder { - return SimpleController().WithManager(m) -} - -// ForType defines the type of Object being *reconciled*, and configures the ControllerManagedBy to respond to create / delete / -// update events by *reconciling the object*. -// This is the equivalent of calling -// Watches(&source.Kind{Type: apiType}, &handler.EnqueueRequestForObject{}) -// If the passed in object has implemented the admission.Defaulter interface, a MutatingWebhook will be wired for this type. -// If the passed in object has implemented the admission.Validator interface, a ValidatingWebhook will be wired for this type. -// -// Deprecated: Use For -func (blder *Builder) ForType(apiType runtime.Object) *Builder { - return blder.For(apiType) -} - -// For defines the type of Object being *reconciled*, and configures the ControllerManagedBy to respond to create / delete / -// update events by *reconciling the object*. -// This is the equivalent of calling -// Watches(&source.Kind{Type: apiType}, &handler.EnqueueRequestForObject{}) -// If the passed in object has implemented the admission.Defaulter interface, a MutatingWebhook will be wired for this type. -// If the passed in object has implemented the admission.Validator interface, a ValidatingWebhook will be wired for this type. -func (blder *Builder) For(apiType runtime.Object) *Builder { - blder.apiType = apiType - return blder -} - -// Owns defines types of Objects being *generated* by the ControllerManagedBy, and configures the ControllerManagedBy to respond to -// create / delete / update events by *reconciling the owner object*. This is the equivalent of calling -// Watches(&handler.EnqueueRequestForOwner{&source.Kind{Type: }, &handler.EnqueueRequestForOwner{OwnerType: apiType, IsController: true}) -func (blder *Builder) Owns(apiType runtime.Object) *Builder { - blder.managedObjects = append(blder.managedObjects, apiType) - return blder -} - -type watchRequest struct { - src source.Source - eventhandler handler.EventHandler -} - -// Watches exposes the lower-level ControllerManagedBy Watches functions through the builder. Consider using -// Owns or For instead of Watches directly. -func (blder *Builder) Watches(src source.Source, eventhandler handler.EventHandler) *Builder { - blder.watchRequest = append(blder.watchRequest, watchRequest{src: src, eventhandler: eventhandler}) - return blder -} - -// WithConfig sets the Config to use for configuring clients. Defaults to the in-cluster config or to ~/.kube/config. -// -// Deprecated: Use ControllerManagedBy(Manager) and this isn't needed. -func (blder *Builder) WithConfig(config *rest.Config) *Builder { - blder.config = config - return blder -} - -// WithManager sets the Manager to use for registering the ControllerManagedBy. Defaults to a new manager.Manager. -// -// Deprecated: Use ControllerManagedBy(Manager) and this isn't needed. -func (blder *Builder) WithManager(m manager.Manager) *Builder { - blder.mgr = m - return blder -} - -// WithEventFilter sets the event filters, to filter which create/update/delete/generic events eventually -// trigger reconciliations. For example, filtering on whether the resource version has changed. -// Defaults to the empty list. -func (blder *Builder) WithEventFilter(p predicate.Predicate) *Builder { - blder.predicates = append(blder.predicates, p) - return blder -} - -// Complete builds the Application ControllerManagedBy. -func (blder *Builder) Complete(r reconcile.Reconciler) error { - _, err := blder.Build(r) - return err -} - -// Build builds the Application ControllerManagedBy and returns the Manager used to start it. -// -// Deprecated: Use Complete -func (blder *Builder) Build(r reconcile.Reconciler) (manager.Manager, error) { - if r == nil { - return nil, fmt.Errorf("must provide a non-nil Reconciler") - } - - // Set the Config - if err := blder.doConfig(); err != nil { - return nil, err - } - - // Set the Manager - if err := blder.doManager(); err != nil { - return nil, err - } - - // Set the ControllerManagedBy - if err := blder.doController(r); err != nil { - return nil, err - } - - // Set the Webook if needed - if err := blder.doWebhook(); err != nil { - return nil, err - } - - // Set the Watch - if err := blder.doWatch(); err != nil { - return nil, err - } - - return blder.mgr, nil -} - -func (blder *Builder) doWatch() error { - // Reconcile type - src := &source.Kind{Type: blder.apiType} - hdler := &handler.EnqueueRequestForObject{} - err := blder.ctrl.Watch(src, hdler, blder.predicates...) - if err != nil { - return err - } - - // Watches the managed types - for _, obj := range blder.managedObjects { - src := &source.Kind{Type: obj} - hdler := &handler.EnqueueRequestForOwner{ - OwnerType: blder.apiType, - IsController: true, - } - if err := blder.ctrl.Watch(src, hdler, blder.predicates...); err != nil { - return err - } - } - - // Do the watch requests - for _, w := range blder.watchRequest { - if err := blder.ctrl.Watch(w.src, w.eventhandler, blder.predicates...); err != nil { - return err - } - - } - return nil -} - -func (blder *Builder) doConfig() error { - if blder.config != nil { - return nil - } - if blder.mgr != nil { - blder.config = blder.mgr.GetConfig() - return nil - } - var err error - blder.config, err = getConfig() - return err -} - -func (blder *Builder) doManager() error { - if blder.mgr != nil { - return nil - } - var err error - blder.mgr, err = newManager(blder.config, manager.Options{}) - return err -} - -func (blder *Builder) getControllerName() (string, error) { - gvk, err := getGvk(blder.apiType, blder.mgr.GetScheme()) - if err != nil { - return "", err - } - name := fmt.Sprintf("%s-application", strings.ToLower(gvk.Kind)) - return name, nil -} - -func (blder *Builder) doController(r reconcile.Reconciler) error { - name, err := blder.getControllerName() - if err != nil { - return err - } - blder.ctrl, err = newController(name, blder.mgr, controller.Options{Reconciler: r}) - return err -} - -func (blder *Builder) doWebhook() error { - // Create a webhook for each type - gvk, err := apiutil.GVKForObject(blder.apiType, blder.mgr.GetScheme()) - if err != nil { - return err - } - - // TODO: When the conversion webhook lands, we need to handle all registered versions of a given group-kind. - // A potential workflow for defaulting webhook - // 1) a bespoke (non-hub) version comes in - // 2) convert it to the hub version - // 3) do defaulting - // 4) convert it back to the same bespoke version - // 5) calculate the JSON patch - // - // A potential workflow for validating webhook - // 1) a bespoke (non-hub) version comes in - // 2) convert it to the hub version - // 3) do validation - if defaulter, isDefaulter := blder.apiType.(admission.Defaulter); isDefaulter { - mwh := admission.DefaultingWebhookFor(defaulter) - if mwh != nil { - path := generateMutatePath(gvk) - log.Info("Registering a mutating webhook", - "GVK", gvk, - "path", path) - - blder.mgr.GetWebhookServer().Register(path, mwh) - } - } - - if validator, isValidator := blder.apiType.(admission.Validator); isValidator { - vwh := admission.ValidatingWebhookFor(validator) - if vwh != nil { - path := generateValidatePath(gvk) - log.Info("Registering a validating webhook", - "GVK", gvk, - "path", path) - blder.mgr.GetWebhookServer().Register(path, vwh) - } - } - - err = conversion.CheckConvertibility(blder.mgr.GetScheme(), blder.apiType) - if err != nil { - log.Error(err, "conversion check failed", "GVK", gvk) - } - return nil -} - -func generateMutatePath(gvk schema.GroupVersionKind) string { - return "/mutate-" + strings.Replace(gvk.Group, ".", "-", -1) + "-" + - gvk.Version + "-" + strings.ToLower(gvk.Kind) -} - -func generateValidatePath(gvk schema.GroupVersionKind) string { - return "/validate-" + strings.Replace(gvk.Group, ".", "-", -1) + "-" + - gvk.Version + "-" + strings.ToLower(gvk.Kind) -} diff --git a/pkg/builder/build_test.go b/pkg/builder/build_test.go deleted file mode 100644 index 241f51c3f1..0000000000 --- a/pkg/builder/build_test.go +++ /dev/null @@ -1,630 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package builder - -import ( - "context" - "errors" - "fmt" - "net/http" - "net/http/httptest" - "os" - "strings" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/scheme" - "sigs.k8s.io/controller-runtime/pkg/source" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" -) - -var _ = Describe("application", func() { - var stop chan struct{} - - BeforeEach(func() { - stop = make(chan struct{}) - getConfig = func() (*rest.Config, error) { return cfg, nil } - newController = controller.New - newManager = manager.New - }) - - AfterEach(func() { - close(stop) - }) - - noop := reconcile.Func(func(req reconcile.Request) (reconcile.Result, error) { return reconcile.Result{}, nil }) - - Describe("New", func() { - It("should return success if given valid objects", func() { - instance, err := SimpleController(). - For(&appsv1.ReplicaSet{}). - Owns(&appsv1.ReplicaSet{}). - Build(noop) - Expect(err).NotTo(HaveOccurred()) - Expect(instance).NotTo(BeNil()) - }) - - It("should return an error if the Config is invalid", func() { - getConfig = func() (*rest.Config, error) { return cfg, fmt.Errorf("expected error") } - instance, err := SimpleController(). - For(&appsv1.ReplicaSet{}). - Owns(&appsv1.ReplicaSet{}). - Build(noop) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("expected error")) - Expect(instance).To(BeNil()) - }) - - It("should return an error if there is no GVK for an object", func() { - instance, err := SimpleController(). - For(&fakeType{}). - Owns(&appsv1.ReplicaSet{}). - Build(noop) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("no kind is registered for the type builder.fakeType")) - Expect(instance).To(BeNil()) - - instance, err = SimpleController(). - For(&appsv1.ReplicaSet{}). - Owns(&fakeType{}). - Build(noop) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("no kind is registered for the type builder.fakeType")) - Expect(instance).To(BeNil()) - }) - - It("should return an error if it cannot create the manager", func() { - newManager = func(config *rest.Config, options manager.Options) (manager.Manager, error) { - return nil, fmt.Errorf("expected error") - } - instance, err := SimpleController(). - For(&appsv1.ReplicaSet{}). - Owns(&appsv1.ReplicaSet{}). - Build(noop) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("expected error")) - Expect(instance).To(BeNil()) - }) - - It("should return an error if it cannot create the controller", func() { - newController = func(name string, mgr manager.Manager, options controller.Options) ( - controller.Controller, error) { - return nil, fmt.Errorf("expected error") - } - instance, err := SimpleController(). - For(&appsv1.ReplicaSet{}). - Owns(&appsv1.ReplicaSet{}). - Build(noop) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("expected error")) - Expect(instance).To(BeNil()) - }) - - It("should scaffold a defaulting webhook if the type implements the Defaulter interface", func() { - By("creating a controller manager") - m, err := manager.New(cfg, manager.Options{}) - Expect(err).NotTo(HaveOccurred()) - - By("registering the type in the Scheme") - builder := scheme.Builder{GroupVersion: testDefaulterGVK.GroupVersion()} - builder.Register(&TestDefaulter{}, &TestDefaulterList{}) - err = builder.AddToScheme(m.GetScheme()) - Expect(err).NotTo(HaveOccurred()) - - instance, err := ControllerManagedBy(m). - For(&TestDefaulter{}). - Owns(&appsv1.ReplicaSet{}). - Build(noop) - Expect(err).NotTo(HaveOccurred()) - Expect(instance).NotTo(BeNil()) - svr := m.GetWebhookServer() - Expect(svr).NotTo(BeNil()) - - reader := strings.NewReader(`{ - "kind":"AdmissionReview", - "apiVersion":"admission.k8s.io/v1beta1", - "request":{ - "uid":"07e52e8d-4513-11e9-a716-42010a800270", - "kind":{ - "group":"", - "version":"v1", - "kind":"TestDefaulter" - }, - "resource":{ - "group":"", - "version":"v1", - "resource":"testdefaulter" - }, - "namespace":"default", - "operation":"CREATE", - "object":{ - "replica":1 - }, - "oldObject":null - } -}`) - - stopCh := make(chan struct{}) - close(stopCh) - // TODO: we may want to improve it to make it be able to inject dependencies, - // but not always try to load certs and return not found error. - err = svr.Start(stopCh) - if err != nil && !os.IsNotExist(err) { - Expect(err).NotTo(HaveOccurred()) - } - - By("sending a request to a mutating webhook path") - path := generateMutatePath(testDefaulterGVK) - req := httptest.NewRequest("POST", "http://svc-name.svc-ns.svc"+path, reader) - req.Header.Add(http.CanonicalHeaderKey("Content-Type"), "application/json") - w := httptest.NewRecorder() - svr.WebhookMux.ServeHTTP(w, req) - Expect(w.Code).To(Equal(http.StatusOK)) - By("sanity checking the response contains reasonable fields") - Expect(w.Body).To(ContainSubstring(`"allowed":true`)) - Expect(w.Body).To(ContainSubstring(`"patch":`)) - Expect(w.Body).To(ContainSubstring(`"code":200`)) - - By("sending a request to a validating webhook path that doesn't exist") - path = generateValidatePath(testDefaulterGVK) - req = httptest.NewRequest("POST", "http://svc-name.svc-ns.svc"+path, reader) - req.Header.Add(http.CanonicalHeaderKey("Content-Type"), "application/json") - w = httptest.NewRecorder() - svr.WebhookMux.ServeHTTP(w, req) - Expect(w.Code).To(Equal(http.StatusNotFound)) - }) - - It("should scaffold a validating webhook if the type implements the Validator interface", func() { - By("creating a controller manager") - m, err := manager.New(cfg, manager.Options{}) - Expect(err).NotTo(HaveOccurred()) - - By("registering the type in the Scheme") - builder := scheme.Builder{GroupVersion: testValidatorGVK.GroupVersion()} - builder.Register(&TestValidator{}, &TestValidatorList{}) - err = builder.AddToScheme(m.GetScheme()) - Expect(err).NotTo(HaveOccurred()) - - instance, err := ControllerManagedBy(m). - For(&TestValidator{}). - Owns(&appsv1.ReplicaSet{}). - Build(noop) - Expect(err).NotTo(HaveOccurred()) - Expect(instance).NotTo(BeNil()) - svr := m.GetWebhookServer() - Expect(svr).NotTo(BeNil()) - - reader := strings.NewReader(`{ - "kind":"AdmissionReview", - "apiVersion":"admission.k8s.io/v1beta1", - "request":{ - "uid":"07e52e8d-4513-11e9-a716-42010a800270", - "kind":{ - "group":"", - "version":"v1", - "kind":"TestValidator" - }, - "resource":{ - "group":"", - "version":"v1", - "resource":"testvalidator" - }, - "namespace":"default", - "operation":"UPDATE", - "object":{ - "replica":1 - }, - "oldObject":{ - "replica":2 - } - } -}`) - - stopCh := make(chan struct{}) - close(stopCh) - // TODO: we may want to improve it to make it be able to inject dependencies, - // but not always try to load certs and return not found error. - err = svr.Start(stopCh) - if err != nil && !os.IsNotExist(err) { - Expect(err).NotTo(HaveOccurred()) - } - - By("sending a request to a mutating webhook path that doesn't exist") - path := generateMutatePath(testValidatorGVK) - req := httptest.NewRequest("POST", "http://svc-name.svc-ns.svc"+path, reader) - req.Header.Add(http.CanonicalHeaderKey("Content-Type"), "application/json") - w := httptest.NewRecorder() - svr.WebhookMux.ServeHTTP(w, req) - Expect(w.Code).To(Equal(http.StatusNotFound)) - - By("sending a request to a validating webhook path") - path = generateValidatePath(testValidatorGVK) - req = httptest.NewRequest("POST", "http://svc-name.svc-ns.svc"+path, reader) - req.Header.Add(http.CanonicalHeaderKey("Content-Type"), "application/json") - w = httptest.NewRecorder() - svr.WebhookMux.ServeHTTP(w, req) - Expect(w.Code).To(Equal(http.StatusOK)) - By("sanity checking the response contains reasonable field") - Expect(w.Body).To(ContainSubstring(`"allowed":false`)) - Expect(w.Body).To(ContainSubstring(`"code":403`)) - }) - - It("should scaffold defaulting and validating webhooks if the type implements both Defaulter and Validator interfaces", func() { - By("creating a controller manager") - m, err := manager.New(cfg, manager.Options{}) - Expect(err).NotTo(HaveOccurred()) - - By("registering the type in the Scheme") - builder := scheme.Builder{GroupVersion: testDefaultValidatorGVK.GroupVersion()} - builder.Register(&TestDefaultValidator{}, &TestDefaultValidatorList{}) - err = builder.AddToScheme(m.GetScheme()) - Expect(err).NotTo(HaveOccurred()) - - instance, err := ControllerManagedBy(m). - For(&TestDefaultValidator{}). - Owns(&appsv1.ReplicaSet{}). - Build(noop) - Expect(err).NotTo(HaveOccurred()) - Expect(instance).NotTo(BeNil()) - svr := m.GetWebhookServer() - Expect(svr).NotTo(BeNil()) - - reader := strings.NewReader(`{ - "kind":"AdmissionReview", - "apiVersion":"admission.k8s.io/v1beta1", - "request":{ - "uid":"07e52e8d-4513-11e9-a716-42010a800270", - "kind":{ - "group":"", - "version":"v1", - "kind":"TestDefaultValidator" - }, - "resource":{ - "group":"", - "version":"v1", - "resource":"testdefaultvalidator" - }, - "namespace":"default", - "operation":"CREATE", - "object":{ - "replica":1 - }, - "oldObject":null - } -}`) - - stopCh := make(chan struct{}) - close(stopCh) - // TODO: we may want to improve it to make it be able to inject dependencies, - // but not always try to load certs and return not found error. - err = svr.Start(stopCh) - if err != nil && !os.IsNotExist(err) { - Expect(err).NotTo(HaveOccurred()) - } - - By("sending a request to a mutating webhook path") - path := generateMutatePath(testDefaultValidatorGVK) - req := httptest.NewRequest("POST", "http://svc-name.svc-ns.svc"+path, reader) - req.Header.Add(http.CanonicalHeaderKey("Content-Type"), "application/json") - w := httptest.NewRecorder() - svr.WebhookMux.ServeHTTP(w, req) - Expect(w.Code).To(Equal(http.StatusOK)) - By("sanity checking the response contains reasonable field") - Expect(w.Body).To(ContainSubstring(`"allowed":true`)) - Expect(w.Body).To(ContainSubstring(`"patch":`)) - Expect(w.Body).To(ContainSubstring(`"code":200`)) - - By("sending a request to a validating webhook path") - path = generateValidatePath(testDefaultValidatorGVK) - req = httptest.NewRequest("POST", "http://svc-name.svc-ns.svc"+path, reader) - req.Header.Add(http.CanonicalHeaderKey("Content-Type"), "application/json") - w = httptest.NewRecorder() - svr.WebhookMux.ServeHTTP(w, req) - Expect(w.Code).To(Equal(http.StatusOK)) - By("sanity checking the response contains reasonable field") - Expect(w.Body).To(ContainSubstring(`"allowed":true`)) - Expect(w.Body).To(ContainSubstring(`"code":200`)) - }) - }) - - Describe("Start with SimpleController", func() { - It("should Reconcile Owns objects", func(done Done) { - bldr := SimpleController(). - ForType(&appsv1.Deployment{}). - WithConfig(cfg). - Owns(&appsv1.ReplicaSet{}) - doReconcileTest("1", stop, bldr, nil, false) - - close(done) - }, 10) - - It("should Reconcile Owns objects with a Manager", func(done Done) { - m, err := manager.New(cfg, manager.Options{}) - Expect(err).NotTo(HaveOccurred()) - - bldr := SimpleController(). - WithManager(m). - For(&appsv1.Deployment{}). - Owns(&appsv1.ReplicaSet{}) - doReconcileTest("2", stop, bldr, m, false) - close(done) - }, 10) - }) - - Describe("Start with ControllerManagedBy", func() { - It("should Reconcile Owns objects", func(done Done) { - m, err := manager.New(cfg, manager.Options{}) - Expect(err).NotTo(HaveOccurred()) - - bldr := ControllerManagedBy(m). - For(&appsv1.Deployment{}). - Owns(&appsv1.ReplicaSet{}) - doReconcileTest("3", stop, bldr, m, false) - close(done) - }, 10) - - It("should Reconcile Watches objects", func(done Done) { - m, err := manager.New(cfg, manager.Options{}) - Expect(err).NotTo(HaveOccurred()) - - bldr := ControllerManagedBy(m). - For(&appsv1.Deployment{}). - Watches( // Equivalent of Owns - &source.Kind{Type: &appsv1.ReplicaSet{}}, - &handler.EnqueueRequestForOwner{OwnerType: &appsv1.Deployment{}, IsController: true}) - doReconcileTest("4", stop, bldr, m, true) - close(done) - }, 10) - }) -}) - -func doReconcileTest(nameSuffix string, stop chan struct{}, blder *Builder, mgr manager.Manager, complete bool) { - deployName := "deploy-name-" + nameSuffix - rsName := "rs-name-" + nameSuffix - - By("Creating the application") - ch := make(chan reconcile.Request) - fn := reconcile.Func(func(req reconcile.Request) (reconcile.Result, error) { - defer GinkgoRecover() - if !strings.HasSuffix(req.Name, nameSuffix) { - // From different test, ignore this request. Etcd is shared across tests. - return reconcile.Result{}, nil - } - ch <- req - return reconcile.Result{}, nil - }) - - instance := mgr - if complete { - err := blder.Complete(fn) - Expect(err).NotTo(HaveOccurred()) - } else { - var err error - instance, err = blder.Build(fn) - Expect(err).NotTo(HaveOccurred()) - } - - // Manager should match - if mgr != nil { - Expect(instance).To(Equal(mgr)) - } - - By("Starting the application") - go func() { - defer GinkgoRecover() - Expect(instance.Start(stop)).NotTo(HaveOccurred()) - By("Stopping the application") - }() - - By("Creating a Deployment") - // Expect a Reconcile when the Deployment is managedObjects. - dep := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: deployName, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"foo": "bar"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "nginx", - Image: "nginx", - }, - }, - }, - }, - }, - } - err := instance.GetClient().Create(context.TODO(), dep) - Expect(err).NotTo(HaveOccurred()) - - By("Waiting for the Deployment Reconcile") - Expect(<-ch).To(Equal(reconcile.Request{ - NamespacedName: types.NamespacedName{Namespace: "default", Name: deployName}})) - - By("Creating a ReplicaSet") - // Expect a Reconcile when an Owned object is managedObjects. - t := true - rs := &appsv1.ReplicaSet{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: rsName, - Labels: dep.Spec.Selector.MatchLabels, - OwnerReferences: []metav1.OwnerReference{ - { - Name: deployName, - Kind: "Deployment", - APIVersion: "apps/v1", - Controller: &t, - UID: dep.UID, - }, - }, - }, - Spec: appsv1.ReplicaSetSpec{ - Selector: dep.Spec.Selector, - Template: dep.Spec.Template, - }, - } - err = instance.GetClient().Create(context.TODO(), rs) - Expect(err).NotTo(HaveOccurred()) - - By("Waiting for the ReplicaSet Reconcile") - Expect(<-ch).To(Equal(reconcile.Request{ - NamespacedName: types.NamespacedName{Namespace: "default", Name: deployName}})) - -} - -var _ runtime.Object = &fakeType{} - -type fakeType struct{} - -func (*fakeType) GetObjectKind() schema.ObjectKind { return nil } -func (*fakeType) DeepCopyObject() runtime.Object { return nil } - -// TestDefaulter -var _ runtime.Object = &TestDefaulter{} - -type TestDefaulter struct { - Replica int `json:"replica,omitempty"` -} - -var testDefaulterGVK = schema.GroupVersionKind{Group: "foo.test.org", Version: "v1", Kind: "TestDefaulter"} - -func (*TestDefaulter) GetObjectKind() schema.ObjectKind { return nil } -func (d *TestDefaulter) DeepCopyObject() runtime.Object { - return &TestDefaulter{ - Replica: d.Replica, - } -} - -var _ runtime.Object = &TestDefaulterList{} - -type TestDefaulterList struct{} - -func (*TestDefaulterList) GetObjectKind() schema.ObjectKind { return nil } -func (*TestDefaulterList) DeepCopyObject() runtime.Object { return nil } - -func (d *TestDefaulter) Default() { - if d.Replica < 2 { - d.Replica = 2 - } -} - -// TestValidator -var _ runtime.Object = &TestValidator{} - -type TestValidator struct { - Replica int `json:"replica,omitempty"` -} - -var testValidatorGVK = schema.GroupVersionKind{Group: "foo.test.org", Version: "v1", Kind: "TestValidator"} - -func (*TestValidator) GetObjectKind() schema.ObjectKind { return nil } -func (v *TestValidator) DeepCopyObject() runtime.Object { - return &TestValidator{ - Replica: v.Replica, - } -} - -var _ runtime.Object = &TestValidatorList{} - -type TestValidatorList struct{} - -func (*TestValidatorList) GetObjectKind() schema.ObjectKind { return nil } -func (*TestValidatorList) DeepCopyObject() runtime.Object { return nil } - -var _ admission.Validator = &TestValidator{} - -func (v *TestValidator) ValidateCreate() error { - if v.Replica < 0 { - return errors.New("number of replica should be greater than or equal to 0") - } - return nil -} - -func (v *TestValidator) ValidateUpdate(old runtime.Object) error { - if v.Replica < 0 { - return errors.New("number of replica should be greater than or equal to 0") - } - if oldObj, ok := old.(*TestValidator); !ok { - return fmt.Errorf("the old object is expected to be %T", oldObj) - } else if v.Replica < oldObj.Replica { - return fmt.Errorf("new replica %v should not be fewer than old replica %v", v.Replica, oldObj.Replica) - } - return nil -} - -// TestDefaultValidator -var _ runtime.Object = &TestDefaultValidator{} - -type TestDefaultValidator struct { - Replica int `json:"replica,omitempty"` -} - -var testDefaultValidatorGVK = schema.GroupVersionKind{Group: "foo.test.org", Version: "v1", Kind: "TestDefaultValidator"} - -func (*TestDefaultValidator) GetObjectKind() schema.ObjectKind { return nil } -func (dv *TestDefaultValidator) DeepCopyObject() runtime.Object { - return &TestDefaultValidator{ - Replica: dv.Replica, - } -} - -var _ runtime.Object = &TestDefaultValidatorList{} - -type TestDefaultValidatorList struct{} - -func (*TestDefaultValidatorList) GetObjectKind() schema.ObjectKind { return nil } -func (*TestDefaultValidatorList) DeepCopyObject() runtime.Object { return nil } - -func (dv *TestDefaultValidator) Default() { - if dv.Replica < 2 { - dv.Replica = 2 - } -} - -var _ admission.Validator = &TestDefaultValidator{} - -func (dv *TestDefaultValidator) ValidateCreate() error { - if dv.Replica < 0 { - return errors.New("number of replica should be greater than or equal to 0") - } - return nil -} - -func (dv *TestDefaultValidator) ValidateUpdate(old runtime.Object) error { - if dv.Replica < 0 { - return errors.New("number of replica should be greater than or equal to 0") - } - return nil -} diff --git a/pkg/builder/builder_suite_test.go b/pkg/builder/builder_suite_test.go index 1209bfe9f3..11f5b8684d 100644 --- a/pkg/builder/builder_suite_test.go +++ b/pkg/builder/builder_suite_test.go @@ -19,32 +19,32 @@ package builder import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - - apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/rest" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/addr" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" - "sigs.k8s.io/controller-runtime/pkg/metrics" "sigs.k8s.io/controller-runtime/pkg/webhook" - "sigs.k8s.io/testing_frameworks/integration/addr" ) func TestBuilder(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, "application Suite", []Reporter{envtest.NewlineReporter{}}) + RunSpecs(t, "application Suite") } var testenv *envtest.Environment var cfg *rest.Config -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) testenv = &envtest.Environment{} addCRDToEnvironment(testenv, @@ -57,52 +57,55 @@ var _ = BeforeSuite(func(done Done) { Expect(err).NotTo(HaveOccurred()) // Prevent the metrics listener being created - metrics.DefaultBindAddress = "0" + metricsserver.DefaultBindAddress = "0" - webhook.DefaultPort, _, err = addr.Suggest() + webhook.DefaultPort, _, err = addr.Suggest("") Expect(err).NotTo(HaveOccurred()) - - close(done) -}, 60) +}) var _ = AfterSuite(func() { Expect(testenv.Stop()).To(Succeed()) // Put the DefaultBindAddress back - metrics.DefaultBindAddress = ":8080" + metricsserver.DefaultBindAddress = ":8080" // Change the webhook.DefaultPort back to the original default. - webhook.DefaultPort = 443 + webhook.DefaultPort = 9443 }) func addCRDToEnvironment(env *envtest.Environment, gvks ...schema.GroupVersionKind) { for _, gvk := range gvks { - plural, singlar := meta.UnsafeGuessKindToResource(gvk) - crd := &apiextensionsv1beta1.CustomResourceDefinition{ + plural, singular := meta.UnsafeGuessKindToResource(gvk) + crd := &apiextensionsv1.CustomResourceDefinition{ TypeMeta: metav1.TypeMeta{ - APIVersion: "apiextensions.k8s.io", + APIVersion: "apiextensions.k8s.io/v1", Kind: "CustomResourceDefinition", }, ObjectMeta: metav1.ObjectMeta{ Name: plural.Resource + "." + gvk.Group, }, - Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ - Group: gvk.Group, - Version: gvk.Version, - Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: gvk.Group, + Names: apiextensionsv1.CustomResourceDefinitionNames{ Plural: plural.Resource, - Singular: singlar.Resource, + Singular: singular.Resource, Kind: gvk.Kind, }, - Versions: []apiextensionsv1beta1.CustomResourceDefinitionVersion{ + Scope: apiextensionsv1.NamespaceScoped, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ { Name: gvk.Version, Served: true, Storage: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + }, + }, }, }, }, } - env.CRDs = append(env.CRDs, crd) + env.CRDInstallOptions.CRDs = append(env.CRDInstallOptions.CRDs, crd) } } diff --git a/pkg/builder/controller.go b/pkg/builder/controller.go new file mode 100644 index 0000000000..6d906f6e52 --- /dev/null +++ b/pkg/builder/controller.go @@ -0,0 +1,466 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "errors" + "fmt" + "reflect" + "strings" + + "github.com/go-logr/logr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +// project represents other forms that we can use to +// send/receive a given resource (metadata-only, unstructured, etc). +type objectProjection int + +const ( + // projectAsNormal doesn't change the object from the form given. + projectAsNormal objectProjection = iota + // projectAsMetadata turns this into a metadata-only watch. + projectAsMetadata +) + +// Builder builds a Controller. +type Builder = TypedBuilder[reconcile.Request] + +// TypedBuilder builds a Controller. The request is the request type +// that is passed to the workqueue and then to the Reconciler. +// The workqueue de-duplicates identical requests. +type TypedBuilder[request comparable] struct { + forInput ForInput + ownsInput []OwnsInput + rawSources []source.TypedSource[request] + watchesInput []WatchesInput[request] + mgr manager.Manager + globalPredicates []predicate.Predicate + ctrl controller.TypedController[request] + ctrlOptions controller.TypedOptions[request] + name string + newController func(name string, mgr manager.Manager, options controller.TypedOptions[request]) (controller.TypedController[request], error) +} + +// ControllerManagedBy returns a new controller builder that will be started by the provided Manager. +func ControllerManagedBy(m manager.Manager) *Builder { + return TypedControllerManagedBy[reconcile.Request](m) +} + +// TypedControllerManagedBy returns a new typed controller builder that will be started by the provided Manager. +func TypedControllerManagedBy[request comparable](m manager.Manager) *TypedBuilder[request] { + return &TypedBuilder[request]{mgr: m} +} + +// ForInput represents the information set by the For method. +type ForInput struct { + object client.Object + predicates []predicate.Predicate + objectProjection objectProjection + err error +} + +// For defines the type of Object being *reconciled*, and configures the ControllerManagedBy to respond to create / delete / +// update events by *reconciling the object*. +// +// This is the equivalent of calling +// Watches(source.Kind(cache, &Type{}, &handler.EnqueueRequestForObject{})). +func (blder *TypedBuilder[request]) For(object client.Object, opts ...ForOption) *TypedBuilder[request] { + if blder.forInput.object != nil { + blder.forInput.err = fmt.Errorf("For(...) should only be called once, could not assign multiple objects for reconciliation") + return blder + } + input := ForInput{object: object} + for _, opt := range opts { + opt.ApplyToFor(&input) + } + + blder.forInput = input + return blder +} + +// OwnsInput represents the information set by Owns method. +type OwnsInput struct { + matchEveryOwner bool + object client.Object + predicates []predicate.Predicate + objectProjection objectProjection +} + +// Owns defines types of Objects being *generated* by the ControllerManagedBy, and configures the ControllerManagedBy to respond to +// create / delete / update events by *reconciling the owner object*. +// +// The default behavior reconciles only the first controller-type OwnerReference of the given type. +// Use Owns(object, builder.MatchEveryOwner) to reconcile all owners. +// +// By default, this is the equivalent of calling +// Watches(source.Kind(cache, &Type{}, handler.EnqueueRequestForOwner([...], &OwnerType{}, OnlyControllerOwner()))). +func (blder *TypedBuilder[request]) Owns(object client.Object, opts ...OwnsOption) *TypedBuilder[request] { + input := OwnsInput{object: object} + for _, opt := range opts { + opt.ApplyToOwns(&input) + } + + blder.ownsInput = append(blder.ownsInput, input) + return blder +} + +type untypedWatchesInput interface { + setPredicates([]predicate.Predicate) + setObjectProjection(objectProjection) +} + +// WatchesInput represents the information set by Watches method. +type WatchesInput[request comparable] struct { + obj client.Object + handler handler.TypedEventHandler[client.Object, request] + predicates []predicate.Predicate + objectProjection objectProjection +} + +func (w *WatchesInput[request]) setPredicates(predicates []predicate.Predicate) { + w.predicates = predicates +} + +func (w *WatchesInput[request]) setObjectProjection(objectProjection objectProjection) { + w.objectProjection = objectProjection +} + +// Watches defines the type of Object to watch, and configures the ControllerManagedBy to respond to create / delete / +// update events by *reconciling the object* with the given EventHandler. +// +// This is the equivalent of calling +// WatchesRawSource(source.Kind(cache, object, eventHandler, predicates...)). +func (blder *TypedBuilder[request]) Watches( + object client.Object, + eventHandler handler.TypedEventHandler[client.Object, request], + opts ...WatchesOption, +) *TypedBuilder[request] { + input := WatchesInput[request]{ + obj: object, + handler: eventHandler, + } + for _, opt := range opts { + opt.ApplyToWatches(&input) + } + + blder.watchesInput = append(blder.watchesInput, input) + + return blder +} + +// WatchesMetadata is the same as Watches, but forces the internal cache to only watch PartialObjectMetadata. +// +// This is useful when watching lots of objects, really big objects, or objects for which you only know +// the GVK, but not the structure. You'll need to pass metav1.PartialObjectMetadata to the client +// when fetching objects in your reconciler, otherwise you'll end up with a duplicate structured or unstructured cache. +// +// When watching a resource with metadata only, for example the v1.Pod, you should not Get and List using the v1.Pod type. +// Instead, you should use the special metav1.PartialObjectMetadata type. +// +// ❌ Incorrect: +// +// pod := &v1.Pod{} +// mgr.GetClient().Get(ctx, nsAndName, pod) +// +// ✅ Correct: +// +// pod := &metav1.PartialObjectMetadata{} +// pod.SetGroupVersionKind(schema.GroupVersionKind{ +// Group: "", +// Version: "v1", +// Kind: "Pod", +// }) +// mgr.GetClient().Get(ctx, nsAndName, pod) +// +// In the first case, controller-runtime will create another cache for the +// concrete type on top of the metadata cache; this increases memory +// consumption and leads to race conditions as caches are not in sync. +func (blder *TypedBuilder[request]) WatchesMetadata( + object client.Object, + eventHandler handler.TypedEventHandler[client.Object, request], + opts ...WatchesOption, +) *TypedBuilder[request] { + opts = append(opts, OnlyMetadata) + return blder.Watches(object, eventHandler, opts...) +} + +// WatchesRawSource exposes the lower-level ControllerManagedBy Watches functions through the builder. +// +// WatchesRawSource does not respect predicates configured through WithEventFilter. +// +// WatchesRawSource makes it possible to use typed handlers and predicates with `source.Kind` as well as custom source implementations. +func (blder *TypedBuilder[request]) WatchesRawSource(src source.TypedSource[request]) *TypedBuilder[request] { + blder.rawSources = append(blder.rawSources, src) + + return blder +} + +// WithEventFilter sets the event filters, to filter which create/update/delete/generic events eventually +// trigger reconciliations. For example, filtering on whether the resource version has changed. +// Given predicate is added for all watched objects and thus must be able to deal with the type +// of all watched objects. +// +// Defaults to the empty list. +func (blder *TypedBuilder[request]) WithEventFilter(p predicate.Predicate) *TypedBuilder[request] { + blder.globalPredicates = append(blder.globalPredicates, p) + return blder +} + +// WithOptions overrides the controller options used in doController. Defaults to empty. +func (blder *TypedBuilder[request]) WithOptions(options controller.TypedOptions[request]) *TypedBuilder[request] { + blder.ctrlOptions = options + return blder +} + +// WithLogConstructor overrides the controller options's LogConstructor. +func (blder *TypedBuilder[request]) WithLogConstructor(logConstructor func(*request) logr.Logger) *TypedBuilder[request] { + blder.ctrlOptions.LogConstructor = logConstructor + return blder +} + +// Named sets the name of the controller to the given name. The name shows up +// in metrics, among other things, and thus should be a prometheus compatible name +// (underscores and alphanumeric characters only). +// +// By default, controllers are named using the lowercase version of their kind. +// +// The name must be unique as it is used to identify the controller in metrics and logs. +func (blder *TypedBuilder[request]) Named(name string) *TypedBuilder[request] { + blder.name = name + return blder +} + +// Complete builds the Application Controller. +func (blder *TypedBuilder[request]) Complete(r reconcile.TypedReconciler[request]) error { + _, err := blder.Build(r) + return err +} + +// Build builds the Application Controller and returns the Controller it created. +func (blder *TypedBuilder[request]) Build(r reconcile.TypedReconciler[request]) (controller.TypedController[request], error) { + if r == nil { + return nil, fmt.Errorf("must provide a non-nil Reconciler") + } + if blder.mgr == nil { + return nil, fmt.Errorf("must provide a non-nil Manager") + } + if blder.forInput.err != nil { + return nil, blder.forInput.err + } + + // Set the ControllerManagedBy + if err := blder.doController(r); err != nil { + return nil, err + } + + // Set the Watch + if err := blder.doWatch(); err != nil { + return nil, err + } + + return blder.ctrl, nil +} + +func (blder *TypedBuilder[request]) project(obj client.Object, proj objectProjection) (client.Object, error) { + switch proj { + case projectAsNormal: + return obj, nil + case projectAsMetadata: + metaObj := &metav1.PartialObjectMetadata{} + gvk, err := apiutil.GVKForObject(obj, blder.mgr.GetScheme()) + if err != nil { + return nil, fmt.Errorf("unable to determine GVK of %T for a metadata-only watch: %w", obj, err) + } + metaObj.SetGroupVersionKind(gvk) + return metaObj, nil + default: + panic(fmt.Sprintf("unexpected projection type %v on type %T, should not be possible since this is an internal field", proj, obj)) + } +} + +func (blder *TypedBuilder[request]) doWatch() error { + // Reconcile type + if blder.forInput.object != nil { + obj, err := blder.project(blder.forInput.object, blder.forInput.objectProjection) + if err != nil { + return err + } + + if reflect.TypeFor[request]() != reflect.TypeOf(reconcile.Request{}) { + return fmt.Errorf("For() can only be used with reconcile.Request, got %T", *new(request)) + } + + var hdler handler.TypedEventHandler[client.Object, request] + reflect.ValueOf(&hdler).Elem().Set(reflect.ValueOf(&handler.EnqueueRequestForObject{})) + allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...) + allPredicates = append(allPredicates, blder.forInput.predicates...) + src := source.TypedKind(blder.mgr.GetCache(), obj, hdler, allPredicates...) + if err := blder.ctrl.Watch(src); err != nil { + return err + } + } + + // Watches the managed types + if len(blder.ownsInput) > 0 && blder.forInput.object == nil { + return errors.New("Owns() can only be used together with For()") + } + for _, own := range blder.ownsInput { + obj, err := blder.project(own.object, own.objectProjection) + if err != nil { + return err + } + opts := []handler.OwnerOption{} + if !own.matchEveryOwner { + opts = append(opts, handler.OnlyControllerOwner()) + } + + var hdler handler.TypedEventHandler[client.Object, request] + reflect.ValueOf(&hdler).Elem().Set(reflect.ValueOf(handler.EnqueueRequestForOwner( + blder.mgr.GetScheme(), blder.mgr.GetRESTMapper(), + blder.forInput.object, + opts..., + ))) + allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...) + allPredicates = append(allPredicates, own.predicates...) + src := source.TypedKind(blder.mgr.GetCache(), obj, hdler, allPredicates...) + if err := blder.ctrl.Watch(src); err != nil { + return err + } + } + + // Do the watch requests + if len(blder.watchesInput) == 0 && blder.forInput.object == nil && len(blder.rawSources) == 0 { + return errors.New("there are no watches configured, controller will never get triggered. Use For(), Owns(), Watches() or WatchesRawSource() to set them up") + } + for _, w := range blder.watchesInput { + projected, err := blder.project(w.obj, w.objectProjection) + if err != nil { + return fmt.Errorf("failed to project for %T: %w", w.obj, err) + } + allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...) + allPredicates = append(allPredicates, w.predicates...) + if err := blder.ctrl.Watch(source.TypedKind(blder.mgr.GetCache(), projected, w.handler, allPredicates...)); err != nil { + return err + } + } + for _, src := range blder.rawSources { + if err := blder.ctrl.Watch(src); err != nil { + return err + } + } + return nil +} + +func (blder *TypedBuilder[request]) getControllerName(gvk schema.GroupVersionKind, hasGVK bool) (string, error) { + if blder.name != "" { + return blder.name, nil + } + if !hasGVK { + return "", errors.New("one of For() or Named() must be called") + } + return strings.ToLower(gvk.Kind), nil +} + +func (blder *TypedBuilder[request]) doController(r reconcile.TypedReconciler[request]) error { + globalOpts := blder.mgr.GetControllerOptions() + + ctrlOptions := blder.ctrlOptions + if ctrlOptions.Reconciler != nil && r != nil { + return errors.New("reconciler was set via WithOptions() and via Build() or Complete()") + } + if ctrlOptions.Reconciler == nil { + ctrlOptions.Reconciler = r + } + + // Retrieve the GVK from the object we're reconciling + // to pre-populate logger information, and to optionally generate a default name. + var gvk schema.GroupVersionKind + hasGVK := blder.forInput.object != nil + if hasGVK { + var err error + gvk, err = apiutil.GVKForObject(blder.forInput.object, blder.mgr.GetScheme()) + if err != nil { + return err + } + } + + // Setup concurrency. + if ctrlOptions.MaxConcurrentReconciles == 0 && hasGVK { + groupKind := gvk.GroupKind().String() + + if concurrency, ok := globalOpts.GroupKindConcurrency[groupKind]; ok && concurrency > 0 { + ctrlOptions.MaxConcurrentReconciles = concurrency + } + } + + // Setup cache sync timeout. + if ctrlOptions.CacheSyncTimeout == 0 && globalOpts.CacheSyncTimeout > 0 { + ctrlOptions.CacheSyncTimeout = globalOpts.CacheSyncTimeout + } + + controllerName, err := blder.getControllerName(gvk, hasGVK) + if err != nil { + return err + } + + // Setup the logger. + if ctrlOptions.LogConstructor == nil { + log := blder.mgr.GetLogger().WithValues( + "controller", controllerName, + ) + if hasGVK { + log = log.WithValues( + "controllerGroup", gvk.Group, + "controllerKind", gvk.Kind, + ) + } + + ctrlOptions.LogConstructor = func(in *request) logr.Logger { + log := log + + if req, ok := any(in).(*reconcile.Request); ok && req != nil { + if hasGVK { + log = log.WithValues(gvk.Kind, klog.KRef(req.Namespace, req.Name)) + } + log = log.WithValues( + "namespace", req.Namespace, "name", req.Name, + ) + } + return log + } + } + + if blder.newController == nil { + blder.newController = controller.NewTyped[request] + } + + // Build the controller and return. + blder.ctrl, err = blder.newController(controllerName, blder.mgr, ctrlOptions) + return err +} diff --git a/pkg/builder/controller_test.go b/pkg/builder/controller_test.go new file mode 100644 index 0000000000..46e937d590 --- /dev/null +++ b/pkg/builder/controller_test.go @@ -0,0 +1,740 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "context" + "fmt" + "strings" + "sync/atomic" + + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + "k8s.io/client-go/util/workqueue" + "k8s.io/utils/ptr" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/config" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/scheme" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var _ untypedWatchesInput = (*WatchesInput[struct{}])(nil) + +type testLogger struct { + logr.Logger +} + +func (l *testLogger) Init(logr.RuntimeInfo) { +} + +func (l *testLogger) Enabled(int) bool { + return true +} + +func (l *testLogger) Info(level int, msg string, keysAndValues ...interface{}) { +} + +func (l *testLogger) WithValues(keysAndValues ...interface{}) logr.LogSink { + return l +} + +func (l *testLogger) WithName(name string) logr.LogSink { + return l +} + +type empty struct{} + +var _ = Describe("application", func() { + noop := reconcile.Func(func(context.Context, reconcile.Request) (reconcile.Result, error) { + return reconcile.Result{}, nil + }) + typedNoop := reconcile.TypedFunc[empty](func(context.Context, empty) (reconcile.Result, error) { + return reconcile.Result{}, nil + }) + + Describe("New", func() { + It("should return success if given valid objects", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + instance, err := ControllerManagedBy(m). + For(&appsv1.ReplicaSet{}). + Owns(&appsv1.ReplicaSet{}). + Build(noop) + Expect(err).NotTo(HaveOccurred()) + Expect(instance).NotTo(BeNil()) + }) + + It("should return error if given two apiType objects in For function", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + instance, err := ControllerManagedBy(m). + For(&appsv1.ReplicaSet{}). + For(&appsv1.Deployment{}). + Owns(&appsv1.ReplicaSet{}). + Build(noop) + Expect(err).To(MatchError(ContainSubstring("For(...) should only be called once, could not assign multiple objects for reconciliation"))) + Expect(instance).To(BeNil()) + }) + + It("should return an error if For and Named function are not called", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + instance, err := ControllerManagedBy(m). + Watches(&appsv1.ReplicaSet{}, &handler.EnqueueRequestForObject{}). + Build(noop) + Expect(err).To(MatchError(ContainSubstring("one of For() or Named() must be called"))) + Expect(instance).To(BeNil()) + }) + + It("should return an error when using Owns without For", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + instance, err := ControllerManagedBy(m). + Named("my_controller"). + Owns(&appsv1.ReplicaSet{}). + Build(noop) + Expect(err).To(MatchError(ContainSubstring("Owns() can only be used together with For()"))) + Expect(instance).To(BeNil()) + + }) + + It("should return an error when there are no watches", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + instance, err := ControllerManagedBy(m). + Named("my_new_controller"). + Build(noop) + Expect(err).To(MatchError(ContainSubstring("there are no watches configured, controller will never get triggered. Use For(), Owns(), Watches() or WatchesRawSource() to set them up"))) + Expect(instance).To(BeNil()) + }) + + It("should allow creating a controller without calling For", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + instance, err := ControllerManagedBy(m). + Named("my_other_controller"). + Watches(&appsv1.ReplicaSet{}, &handler.EnqueueRequestForObject{}). + Build(noop) + Expect(err).NotTo(HaveOccurred()) + Expect(instance).NotTo(BeNil()) + }) + + It("should return an error if there is no GVK for an object, and thus we can't default the controller name", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a controller with a bad For type") + instance, err := ControllerManagedBy(m). + For(&fakeType{}). + Owns(&appsv1.ReplicaSet{}). + Build(noop) + Expect(err).To(MatchError(ContainSubstring("no kind is registered for the type builder.fakeType"))) + Expect(instance).To(BeNil()) + + // NB(directxman12): we don't test non-for types, since errors for + // them now manifest on controller.Start, not controller.Watch. Errors on the For type + // manifest when we try to default the controller name, which is good to double check. + }) + + It("should return error if in For is used with a custom request type", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + instance, err := TypedControllerManagedBy[empty](m). + For(&appsv1.ReplicaSet{}). + Named("last_controller"). + Build(typedNoop) + Expect(err).To(MatchError(ContainSubstring("For() can only be used with reconcile.Request, got builder.empty"))) + Expect(instance).To(BeNil()) + }) + + It("should return error if in Owns is used with a custom request type", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + instance, err := TypedControllerManagedBy[empty](m). + Named("my_controller-0"). + Owns(&appsv1.ReplicaSet{}). + Build(typedNoop) + // If we ever allow Owns() without For() we need to update the code to error + // out on Owns() if the request type is different from reconcile.Request just + // like we do in For(). + Expect(err).To(MatchError("Owns() can only be used together with For()")) + Expect(instance).To(BeNil()) + }) + + It("should build a controller with a custom request type", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + instance, err := TypedControllerManagedBy[empty](m). + Named("my_controller-1"). + WatchesRawSource( + source.TypedKind( + m.GetCache(), + &appsv1.ReplicaSet{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, rs *appsv1.ReplicaSet) []empty { + return []empty{{}} + }), + ), + ). + Build(typedNoop) + Expect(err).NotTo(HaveOccurred()) + Expect(instance).NotTo(BeNil()) + }) + + It("should return an error if it cannot create the controller", func() { + + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + builder := ControllerManagedBy(m). + For(&appsv1.ReplicaSet{}). + Owns(&appsv1.ReplicaSet{}) + builder.newController = func(name string, mgr manager.Manager, options controller.Options) ( + controller.Controller, error) { + return nil, fmt.Errorf("expected error") + } + instance, err := builder.Build(noop) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("expected error")) + Expect(instance).To(BeNil()) + }) + + It("should override max concurrent reconcilers during creation of controller", func() { + const maxConcurrentReconciles = 5 + newController := func(name string, mgr manager.Manager, options controller.Options) ( + controller.Controller, error) { + if options.MaxConcurrentReconciles == maxConcurrentReconciles { + return controller.New(name, mgr, options) + } + return nil, fmt.Errorf("max concurrent reconcilers expected %d but found %d", maxConcurrentReconciles, options.MaxConcurrentReconciles) + } + + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + builder := ControllerManagedBy(m). + For(&appsv1.ReplicaSet{}). + Named("replicaset-4"). + Owns(&appsv1.ReplicaSet{}). + WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}) + builder.newController = newController + + instance, err := builder.Build(noop) + Expect(err).NotTo(HaveOccurred()) + Expect(instance).NotTo(BeNil()) + }) + + It("should override max concurrent reconcilers during creation of controller, when using", func() { + const maxConcurrentReconciles = 10 + newController := func(name string, mgr manager.Manager, options controller.Options) ( + controller.Controller, error) { + if options.MaxConcurrentReconciles == maxConcurrentReconciles { + return controller.New(name, mgr, options) + } + return nil, fmt.Errorf("max concurrent reconcilers expected %d but found %d", maxConcurrentReconciles, options.MaxConcurrentReconciles) + } + + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{ + Controller: config.Controller{ + GroupKindConcurrency: map[string]int{ + "ReplicaSet.apps": maxConcurrentReconciles, + }, + }, + }) + Expect(err).NotTo(HaveOccurred()) + + builder := ControllerManagedBy(m). + For(&appsv1.ReplicaSet{}). + Named("replicaset-3"). + Owns(&appsv1.ReplicaSet{}) + builder.newController = newController + + instance, err := builder.Build(noop) + Expect(err).NotTo(HaveOccurred()) + Expect(instance).NotTo(BeNil()) + }) + + It("should override rate limiter during creation of controller", func() { + rateLimiter := workqueue.DefaultTypedItemBasedRateLimiter[reconcile.Request]() + newController := func(name string, mgr manager.Manager, options controller.Options) (controller.Controller, error) { + if options.RateLimiter == rateLimiter { + return controller.New(name, mgr, options) + } + return nil, fmt.Errorf("rate limiter expected %T but found %T", rateLimiter, options.RateLimiter) + } + + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + builder := ControllerManagedBy(m). + For(&appsv1.ReplicaSet{}). + Named("replicaset-2"). + Owns(&appsv1.ReplicaSet{}). + WithOptions(controller.Options{RateLimiter: rateLimiter}) + builder.newController = newController + + instance, err := builder.Build(noop) + Expect(err).NotTo(HaveOccurred()) + Expect(instance).NotTo(BeNil()) + }) + + It("should override logger during creation of controller", func() { + logger := &testLogger{} + newController := func(name string, mgr manager.Manager, options controller.Options) (controller.Controller, error) { + if options.LogConstructor(nil).GetSink() == logger { + return controller.New(name, mgr, options) + } + return nil, fmt.Errorf("logger expected %T but found %T", logger, options.LogConstructor) + } + + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + builder := ControllerManagedBy(m). + For(&appsv1.ReplicaSet{}). + Named("replicaset-0"). + Owns(&appsv1.ReplicaSet{}). + WithLogConstructor(func(request *reconcile.Request) logr.Logger { + return logr.New(logger) + }) + builder.newController = newController + instance, err := builder.Build(noop) + Expect(err).NotTo(HaveOccurred()) + Expect(instance).NotTo(BeNil()) + }) + + It("should not allow multiple reconcilers during creation of controller", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + builder := ControllerManagedBy(m). + For(&appsv1.ReplicaSet{}). + Named("replicaset-1"). + Owns(&appsv1.ReplicaSet{}). + WithOptions(controller.Options{Reconciler: noop}) + instance, err := builder.Build(noop) + Expect(err).To(HaveOccurred()) + Expect(instance).To(BeNil()) + }) + + It("should allow multiple controllers for the same kind", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testDefaultValidatorGVK.GroupVersion()} + builder.Register(&TestDefaultValidator{}, &TestDefaultValidatorList{}) + err = builder.AddToScheme(m.GetScheme()) + Expect(err).NotTo(HaveOccurred()) + + By("creating the 1st controller") + ctrl1, err := ControllerManagedBy(m). + For(&TestDefaultValidator{}). + Owns(&appsv1.ReplicaSet{}). + Build(noop) + Expect(err).NotTo(HaveOccurred()) + Expect(ctrl1).NotTo(BeNil()) + + By("creating the 2nd controller") + ctrl2, err := ControllerManagedBy(m). + For(&TestDefaultValidator{}). + Named("test-default-validator-1"). + Owns(&appsv1.ReplicaSet{}). + Build(noop) + Expect(err).NotTo(HaveOccurred()) + Expect(ctrl2).NotTo(BeNil()) + }) + }) + + Describe("Start with ControllerManagedBy", func() { + It("should Reconcile Owns objects", func(ctx SpecContext) { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + bldr := ControllerManagedBy(m). + For(&appsv1.Deployment{}). + Named("deployment-0"). + Owns(&appsv1.ReplicaSet{}) + + doReconcileTest(ctx, "3", m, false, bldr) + }) + + It("should Reconcile Owns objects for every owner", func(ctx SpecContext) { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + bldr := ControllerManagedBy(m). + For(&appsv1.Deployment{}). + Named("deployment-1"). + Owns(&appsv1.ReplicaSet{}, MatchEveryOwner) + + doReconcileTest(ctx, "12", m, false, bldr) + }) + + It("should Reconcile Watches objects", func(ctx SpecContext) { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + bldr := ControllerManagedBy(m). + For(&appsv1.Deployment{}). + Watches( // Equivalent of Owns + &appsv1.ReplicaSet{}, + handler.EnqueueRequestForOwner(m.GetScheme(), m.GetRESTMapper(), &appsv1.Deployment{}, handler.OnlyControllerOwner()), + ) + + doReconcileTest(ctx, "4", m, true, bldr) + }) + + It("should Reconcile without For", func(ctx SpecContext) { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + bldr := ControllerManagedBy(m). + Named("Deployment"). + Named("deployment-2"). + Watches( // Equivalent of For + &appsv1.Deployment{}, &handler.EnqueueRequestForObject{}). + Watches( // Equivalent of Owns + &appsv1.ReplicaSet{}, + handler.EnqueueRequestForOwner(m.GetScheme(), m.GetRESTMapper(), &appsv1.Deployment{}, handler.OnlyControllerOwner()), + ) + + doReconcileTest(ctx, "9", m, true, bldr) + }) + }) + + Describe("Set custom predicates", func() { + It("should execute registered predicates only for assigned kind", func(ctx SpecContext) { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + var ( + deployPrctExecuted = false + replicaSetPrctExecuted = false + allPrctExecuted = int64(0) + ) + + deployPrct := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + defer GinkgoRecover() + // check that it was called only for deployment + Expect(e.Object).To(BeAssignableToTypeOf(&appsv1.Deployment{})) + deployPrctExecuted = true + return true + }, + } + + replicaSetPrct := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + defer GinkgoRecover() + // check that it was called only for replicaset + Expect(e.Object).To(BeAssignableToTypeOf(&appsv1.ReplicaSet{})) + replicaSetPrctExecuted = true + return true + }, + } + + allPrct := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + defer GinkgoRecover() + // check that it was called for all registered kinds + Expect(e.Object).Should(Or( + BeAssignableToTypeOf(&appsv1.Deployment{}), + BeAssignableToTypeOf(&appsv1.ReplicaSet{}), + )) + + atomic.AddInt64(&allPrctExecuted, 1) + return true + }, + } + + bldr := ControllerManagedBy(m). + For(&appsv1.Deployment{}, WithPredicates(deployPrct)). + Named("deployment-3"). + Owns(&appsv1.ReplicaSet{}, WithPredicates(replicaSetPrct)). + WithEventFilter(allPrct) + + doReconcileTest(ctx, "5", m, true, bldr) + + Expect(deployPrctExecuted).To(BeTrue(), "Deploy predicated should be called at least once") + Expect(replicaSetPrctExecuted).To(BeTrue(), "ReplicaSet predicated should be called at least once") + Expect(allPrctExecuted).To(BeNumerically(">=", 2), "Global Predicated should be called at least twice") + }) + }) + + Describe("watching with projections", func() { + var mgr manager.Manager + BeforeEach(func() { + // use a cache that intercepts requests for fully typed objects to + // ensure we use the projected versions + var err error + mgr, err = manager.New(cfg, manager.Options{NewCache: newNonTypedOnlyCache}) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should support multiple controllers watching the same metadata kind", func(ctx SpecContext) { + bldr1 := ControllerManagedBy(mgr).For(&appsv1.Deployment{}, OnlyMetadata).Named("deployment-4") + bldr2 := ControllerManagedBy(mgr).For(&appsv1.Deployment{}, OnlyMetadata).Named("deployment-5") + + doReconcileTest(ctx, "6", mgr, true, bldr1, bldr2) + }) + + It("should support watching For, Owns, and Watch as metadata", func(ctx SpecContext) { + statefulSetMaps := make(chan *metav1.PartialObjectMetadata) + + bldr := ControllerManagedBy(mgr). + For(&appsv1.Deployment{}, OnlyMetadata). + Named("deployment-6"). + Owns(&appsv1.ReplicaSet{}, OnlyMetadata). + Watches(&appsv1.StatefulSet{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, o client.Object) []reconcile.Request { + defer GinkgoRecover() + + ometa := o.(*metav1.PartialObjectMetadata) + statefulSetMaps <- ometa + + // Validate that the GVK is not empty when dealing with PartialObjectMetadata objects. + Expect(o.GetObjectKind().GroupVersionKind()).To(Equal(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "StatefulSet", + })) + return nil + }), + OnlyMetadata) + + doReconcileTest(ctx, "8", mgr, true, bldr) + + By("Creating a new stateful set") + set := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test1", + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: appsv1.StatefulSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + }, + }, + } + err := mgr.GetClient().Create(ctx, set) + Expect(err).NotTo(HaveOccurred()) + + By("Checking that the mapping function has been called") + Eventually(func() bool { + metaSet := <-statefulSetMaps + Expect(metaSet.Name).To(Equal(set.Name)) + Expect(metaSet.Namespace).To(Equal(set.Namespace)) + Expect(metaSet.Labels).To(Equal(set.Labels)) + return true + }).Should(BeTrue()) + }) + }) +}) + +// newNonTypedOnlyCache returns a new cache that wraps the normal cache, +// returning an error if normal, typed objects have informers requested. +func newNonTypedOnlyCache(config *rest.Config, opts cache.Options) (cache.Cache, error) { + normalCache, err := cache.New(config, opts) + if err != nil { + return nil, err + } + return &nonTypedOnlyCache{ + Cache: normalCache, + }, nil +} + +// nonTypedOnlyCache is a cache.Cache that only provides metadata & +// unstructured informers. +type nonTypedOnlyCache struct { + cache.Cache +} + +func (c *nonTypedOnlyCache) GetInformer(ctx context.Context, obj client.Object, opts ...cache.InformerGetOption) (cache.Informer, error) { + switch obj.(type) { + case (*metav1.PartialObjectMetadata): + return c.Cache.GetInformer(ctx, obj, opts...) + default: + return nil, fmt.Errorf("did not want to provide an informer for normal type %T", obj) + } +} +func (c *nonTypedOnlyCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...cache.InformerGetOption) (cache.Informer, error) { + return nil, fmt.Errorf("don't try to sidestep the restriction on informer types by calling GetInformerForKind") +} + +// TODO(directxman12): this function has too many arguments, and the whole +// "nameSuffix" think is a bit of a hack It should be cleaned up significantly by someone with a bit of time. +func doReconcileTest(ctx context.Context, nameSuffix string, mgr manager.Manager, complete bool, blders ...*TypedBuilder[reconcile.Request]) { + deployName := "deploy-name-" + nameSuffix + rsName := "rs-name-" + nameSuffix + + By("Creating the application") + ch := make(chan reconcile.Request) + fn := reconcile.Func(func(_ context.Context, req reconcile.Request) (reconcile.Result, error) { + defer GinkgoRecover() + if !strings.HasSuffix(req.Name, nameSuffix) { + // From different test, ignore this request. Etcd is shared across tests. + return reconcile.Result{}, nil + } + ch <- req + return reconcile.Result{}, nil + }) + + for _, blder := range blders { + if complete { + err := blder.Complete(fn) + Expect(err).NotTo(HaveOccurred()) + } else { + var err error + var c controller.Controller + c, err = blder.Build(fn) + Expect(err).NotTo(HaveOccurred()) + Expect(c).NotTo(BeNil()) + } + } + + By("Starting the application") + go func() { + defer GinkgoRecover() + Expect(mgr.Start(ctx)).NotTo(HaveOccurred()) + }() + + By("Creating a Deployment") + // Expect a Reconcile when the Deployment is managedObjects. + dep := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: deployName, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + }, + }, + } + err := mgr.GetClient().Create(ctx, dep) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for the Deployment Reconcile") + Eventually(ch).Should(Receive(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "default", Name: deployName}}))) + + By("Creating a ReplicaSet") + // Expect a Reconcile when an Owned object is managedObjects. + rs := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: rsName, + Labels: dep.Spec.Selector.MatchLabels, + OwnerReferences: []metav1.OwnerReference{ + { + Name: deployName, + Kind: "Deployment", + APIVersion: "apps/v1", + Controller: ptr.To(true), + UID: dep.UID, + }, + }, + }, + Spec: appsv1.ReplicaSetSpec{ + Selector: dep.Spec.Selector, + Template: dep.Spec.Template, + }, + } + err = mgr.GetClient().Create(ctx, rs) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for the ReplicaSet Reconcile") + Eventually(ch).Should(Receive(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "default", Name: deployName}}))) +} + +var _ runtime.Object = &fakeType{} + +type fakeType struct { + metav1.TypeMeta + metav1.ObjectMeta +} + +func (*fakeType) GetObjectKind() schema.ObjectKind { return nil } +func (*fakeType) DeepCopyObject() runtime.Object { return nil } diff --git a/pkg/builder/doc.go b/pkg/builder/doc.go index 09126576b2..e4df1b709f 100644 --- a/pkg/builder/doc.go +++ b/pkg/builder/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package builder provides wraps other controller-runtime libraries and exposes simple +// Package builder wraps other controller-runtime libraries and exposes simple // patterns for building common Controllers. // // Projects built with the builder package can trivially be rebased on top of the underlying diff --git a/pkg/builder/example_test.go b/pkg/builder/example_test.go index 735a04852b..e265853987 100644 --- a/pkg/builder/example_test.go +++ b/pkg/builder/example_test.go @@ -21,18 +21,71 @@ import ( "fmt" "os" - logf "sigs.k8s.io/controller-runtime/pkg/log" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager/signals" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) +func ExampleBuilder_metadata_only() { + logf.SetLogger(zap.New()) + + log := logf.Log.WithName("builder-examples") + + mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) + if err != nil { + log.Error(err, "could not create manager") + os.Exit(1) + } + + cl := mgr.GetClient() + err = builder. + ControllerManagedBy(mgr). // Create the ControllerManagedBy + For(&appsv1.ReplicaSet{}). // ReplicaSet is the Application API + Owns(&corev1.Pod{}, builder.OnlyMetadata). // ReplicaSet owns Pods created by it, and caches them as metadata only + Complete(reconcile.Func(func(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + // Read the ReplicaSet + rs := &appsv1.ReplicaSet{} + err := cl.Get(ctx, req.NamespacedName, rs) + if err != nil { + return reconcile.Result{}, client.IgnoreNotFound(err) + } + + // List the Pods matching the PodTemplate Labels, but only their metadata + var podsMeta metav1.PartialObjectMetadataList + err = cl.List(ctx, &podsMeta, client.InNamespace(req.Namespace), client.MatchingLabels(rs.Spec.Template.Labels)) + if err != nil { + return reconcile.Result{}, client.IgnoreNotFound(err) + } + + // Update the ReplicaSet + rs.Labels["pod-count"] = fmt.Sprintf("%v", len(podsMeta.Items)) + err = cl.Update(ctx, rs) + if err != nil { + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil + })) + if err != nil { + log.Error(err, "could not create controller") + os.Exit(1) + } + + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + log.Error(err, "could not start manager") + os.Exit(1) + } +} + // This example creates a simple application ControllerManagedBy that is configured for ReplicaSets and Pods. // // * Create a new application for ReplicaSets that manages Pods owned by the ReplicaSet and calls into @@ -40,7 +93,9 @@ import ( // // * Start the application. func ExampleBuilder() { - var log = logf.Log.WithName("builder-examples") + logf.SetLogger(zap.New()) + + log := logf.Log.WithName("builder-examples") mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) if err != nil { @@ -52,7 +107,9 @@ func ExampleBuilder() { ControllerManagedBy(mgr). // Create the ControllerManagedBy For(&appsv1.ReplicaSet{}). // ReplicaSet is the Application API Owns(&corev1.Pod{}). // ReplicaSet owns Pods created by it - Complete(&ReplicaSetReconciler{}) + Complete(&ReplicaSetReconciler{ + Client: mgr.GetClient(), + }) if err != nil { log.Error(err, "could not create controller") os.Exit(1) @@ -75,34 +132,28 @@ type ReplicaSetReconciler struct { // // * Read the ReplicaSet // * Read the Pods -// * Set a Label on the ReplicaSet with the Pod count -func (a *ReplicaSetReconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) { +// * Set a Label on the ReplicaSet with the Pod count. +func (a *ReplicaSetReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { // Read the ReplicaSet rs := &appsv1.ReplicaSet{} - err := a.Get(context.TODO(), req.NamespacedName, rs) + err := a.Get(ctx, req.NamespacedName, rs) if err != nil { return reconcile.Result{}, err } // List the Pods matching the PodTemplate Labels pods := &corev1.PodList{} - err = a.List(context.TODO(), pods, client.InNamespace(req.Namespace), - client.MatchingLabels(rs.Spec.Template.Labels)) + err = a.List(ctx, pods, client.InNamespace(req.Namespace), client.MatchingLabels(rs.Spec.Template.Labels)) if err != nil { return reconcile.Result{}, err } // Update the ReplicaSet rs.Labels["pod-count"] = fmt.Sprintf("%v", len(pods.Items)) - err = a.Update(context.TODO(), rs) + err = a.Update(ctx, rs) if err != nil { return reconcile.Result{}, err } return reconcile.Result{}, nil } - -func (a *ReplicaSetReconciler) InjectClient(c client.Client) error { - a.Client = c - return nil -} diff --git a/pkg/builder/example_webhook_test.go b/pkg/builder/example_webhook_test.go new file mode 100644 index 0000000000..c26eba8a13 --- /dev/null +++ b/pkg/builder/example_webhook_test.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder_test + +import ( + "os" + + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client/config" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + + examplegroup "sigs.k8s.io/controller-runtime/examples/crd/pkg" +) + +// This example use webhook builder to create a simple webhook that is managed +// by a manager for CRD ChaosPod. And then start the manager. +func ExampleWebhookBuilder() { + var log = logf.Log.WithName("webhookbuilder-example") + + mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) + if err != nil { + log.Error(err, "could not create manager") + os.Exit(1) + } + + err = builder. + WebhookManagedBy(mgr). // Create the WebhookManagedBy + For(&examplegroup.ChaosPod{}). // ChaosPod is a CRD. + Complete() + if err != nil { + log.Error(err, "could not create webhook") + os.Exit(1) + } + + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + log.Error(err, "could not start manager") + os.Exit(1) + } +} diff --git a/pkg/builder/options.go b/pkg/builder/options.go new file mode 100644 index 0000000000..b907b5d020 --- /dev/null +++ b/pkg/builder/options.go @@ -0,0 +1,156 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// {{{ "Functional" Option Interfaces + +// ForOption is some configuration that modifies options for a For request. +type ForOption interface { + // ApplyToFor applies this configuration to the given for input. + ApplyToFor(*ForInput) +} + +// OwnsOption is some configuration that modifies options for an owns request. +type OwnsOption interface { + // ApplyToOwns applies this configuration to the given owns input. + ApplyToOwns(*OwnsInput) +} + +// WatchesOption is some configuration that modifies options for a watches request. +type WatchesOption interface { + // ApplyToWatches applies this configuration to the given watches options. + ApplyToWatches(untypedWatchesInput) +} + +// }}} + +// {{{ Multi-Type Options + +// WithPredicates sets the given predicates list. +func WithPredicates(predicates ...predicate.Predicate) Predicates { + return Predicates{ + predicates: predicates, + } +} + +// Predicates filters events before enqueuing the keys. +type Predicates struct { + predicates []predicate.Predicate +} + +// ApplyToFor applies this configuration to the given ForInput options. +func (w Predicates) ApplyToFor(opts *ForInput) { + opts.predicates = w.predicates +} + +// ApplyToOwns applies this configuration to the given OwnsInput options. +func (w Predicates) ApplyToOwns(opts *OwnsInput) { + opts.predicates = w.predicates +} + +// ApplyToWatches applies this configuration to the given WatchesInput options. +func (w Predicates) ApplyToWatches(opts untypedWatchesInput) { + opts.setPredicates(w.predicates) +} + +var _ ForOption = &Predicates{} +var _ OwnsOption = &Predicates{} +var _ WatchesOption = &Predicates{} + +// }}} + +// {{{ For & Owns Dual-Type options + +// projectAs configures the projection on the input. +// Currently only OnlyMetadata is supported. We might want to expand +// this to arbitrary non-special local projections in the future. +type projectAs objectProjection + +// ApplyToFor applies this configuration to the given ForInput options. +func (p projectAs) ApplyToFor(opts *ForInput) { + opts.objectProjection = objectProjection(p) +} + +// ApplyToOwns applies this configuration to the given OwnsInput options. +func (p projectAs) ApplyToOwns(opts *OwnsInput) { + opts.objectProjection = objectProjection(p) +} + +// ApplyToWatches applies this configuration to the given WatchesInput options. +func (p projectAs) ApplyToWatches(opts untypedWatchesInput) { + opts.setObjectProjection(objectProjection(p)) +} + +var ( + // OnlyMetadata tells the controller to *only* cache metadata, and to watch + // the API server in metadata-only form. This is useful when watching + // lots of objects, really big objects, or objects for which you only know + // the GVK, but not the structure. You'll need to pass + // metav1.PartialObjectMetadata to the client when fetching objects in your + // reconciler, otherwise you'll end up with a duplicate structured or + // unstructured cache. + // + // When watching a resource with OnlyMetadata, for example the v1.Pod, you + // should not Get and List using the v1.Pod type. Instead, you should use + // the special metav1.PartialObjectMetadata type. + // + // ❌ Incorrect: + // + // pod := &v1.Pod{} + // mgr.GetClient().Get(ctx, nsAndName, pod) + // + // ✅ Correct: + // + // pod := &metav1.PartialObjectMetadata{} + // pod.SetGroupVersionKind(schema.GroupVersionKind{ + // Group: "", + // Version: "v1", + // Kind: "Pod", + // }) + // mgr.GetClient().Get(ctx, nsAndName, pod) + // + // In the first case, controller-runtime will create another cache for the + // concrete type on top of the metadata cache; this increases memory + // consumption and leads to race conditions as caches are not in sync. + OnlyMetadata = projectAs(projectAsMetadata) + + _ ForOption = OnlyMetadata + _ OwnsOption = OnlyMetadata + _ WatchesOption = OnlyMetadata +) + +// }}} + +// MatchEveryOwner determines whether the watch should be filtered based on +// controller ownership. As in, when the OwnerReference.Controller field is set. +// +// If passed as an option, +// the handler receives notification for every owner of the object with the given type. +// If unset (default), the handler receives notification only for the first +// OwnerReference with `Controller: true`. +var MatchEveryOwner = &matchEveryOwner{} + +type matchEveryOwner struct{} + +// ApplyToOwns applies this configuration to the given OwnsInput options. +func (o matchEveryOwner) ApplyToOwns(opts *OwnsInput) { + opts.matchEveryOwner = true +} diff --git a/pkg/builder/webhook.go b/pkg/builder/webhook.go new file mode 100644 index 0000000000..6f4726d274 --- /dev/null +++ b/pkg/builder/webhook.go @@ -0,0 +1,342 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "context" + "errors" + "net/http" + "net/url" + "regexp" + "strings" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" + "k8s.io/klog/v2" + + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "sigs.k8s.io/controller-runtime/pkg/webhook/conversion" +) + +// WebhookBuilder builds a Webhook. +type WebhookBuilder struct { + apiType runtime.Object + customDefaulter admission.CustomDefaulter + customDefaulterOpts []admission.DefaulterOption + customValidator admission.CustomValidator + customPath string + customValidatorCustomPath string + customDefaulterCustomPath string + gvk schema.GroupVersionKind + mgr manager.Manager + config *rest.Config + recoverPanic *bool + logConstructor func(base logr.Logger, req *admission.Request) logr.Logger + contextFunc func(context.Context, *http.Request) context.Context + err error +} + +// WebhookManagedBy returns a new webhook builder. +func WebhookManagedBy(m manager.Manager) *WebhookBuilder { + return &WebhookBuilder{mgr: m} +} + +// TODO(droot): update the GoDoc for conversion. + +// For takes a runtime.Object which should be a CR. +// If the given object implements the admission.Defaulter interface, a MutatingWebhook will be wired for this type. +// If the given object implements the admission.Validator interface, a ValidatingWebhook will be wired for this type. +func (blder *WebhookBuilder) For(apiType runtime.Object) *WebhookBuilder { + if blder.apiType != nil { + blder.err = errors.New("For(...) should only be called once, could not assign multiple objects for webhook registration") + } + blder.apiType = apiType + return blder +} + +// WithDefaulter takes an admission.CustomDefaulter interface, a MutatingWebhook with the provided opts (admission.DefaulterOption) +// will be wired for this type. +func (blder *WebhookBuilder) WithDefaulter(defaulter admission.CustomDefaulter, opts ...admission.DefaulterOption) *WebhookBuilder { + blder.customDefaulter = defaulter + blder.customDefaulterOpts = opts + return blder +} + +// WithValidator takes a admission.CustomValidator interface, a ValidatingWebhook will be wired for this type. +func (blder *WebhookBuilder) WithValidator(validator admission.CustomValidator) *WebhookBuilder { + blder.customValidator = validator + return blder +} + +// WithLogConstructor overrides the webhook's LogConstructor. +func (blder *WebhookBuilder) WithLogConstructor(logConstructor func(base logr.Logger, req *admission.Request) logr.Logger) *WebhookBuilder { + blder.logConstructor = logConstructor + return blder +} + +// WithContextFunc overrides the webhook's WithContextFunc. +func (blder *WebhookBuilder) WithContextFunc(contextFunc func(context.Context, *http.Request) context.Context) *WebhookBuilder { + blder.contextFunc = contextFunc + return blder +} + +// RecoverPanic indicates whether panics caused by the webhook should be recovered. +// Defaults to true. +func (blder *WebhookBuilder) RecoverPanic(recoverPanic bool) *WebhookBuilder { + blder.recoverPanic = &recoverPanic + return blder +} + +// WithCustomPath overrides the webhook's default path by the customPath +// +// Deprecated: WithCustomPath should not be used anymore. +// Please use WithValidatorCustomPath or WithDefaulterCustomPath instead. +func (blder *WebhookBuilder) WithCustomPath(customPath string) *WebhookBuilder { + blder.customPath = customPath + return blder +} + +// WithValidatorCustomPath overrides the path of the Validator. +func (blder *WebhookBuilder) WithValidatorCustomPath(customPath string) *WebhookBuilder { + blder.customValidatorCustomPath = customPath + return blder +} + +// WithDefaulterCustomPath overrides the path of the Defaulter. +func (blder *WebhookBuilder) WithDefaulterCustomPath(customPath string) *WebhookBuilder { + blder.customDefaulterCustomPath = customPath + return blder +} + +// Complete builds the webhook. +func (blder *WebhookBuilder) Complete() error { + // Set the Config + blder.loadRestConfig() + + // Configure the default LogConstructor + blder.setLogConstructor() + + // Set the Webhook if needed + return blder.registerWebhooks() +} + +func (blder *WebhookBuilder) loadRestConfig() { + if blder.config == nil { + blder.config = blder.mgr.GetConfig() + } +} + +func (blder *WebhookBuilder) setLogConstructor() { + if blder.logConstructor == nil { + blder.logConstructor = func(base logr.Logger, req *admission.Request) logr.Logger { + log := base.WithValues( + "webhookGroup", blder.gvk.Group, + "webhookKind", blder.gvk.Kind, + ) + if req != nil { + return log.WithValues( + blder.gvk.Kind, klog.KRef(req.Namespace, req.Name), + "namespace", req.Namespace, "name", req.Name, + "resource", req.Resource, "user", req.UserInfo.Username, + "requestID", req.UID, + ) + } + return log + } + } +} + +func (blder *WebhookBuilder) isThereCustomPathConflict() bool { + return (blder.customPath != "" && blder.customDefaulter != nil && blder.customValidator != nil) || (blder.customPath != "" && blder.customDefaulterCustomPath != "") || (blder.customPath != "" && blder.customValidatorCustomPath != "") +} + +func (blder *WebhookBuilder) registerWebhooks() error { + typ, err := blder.getType() + if err != nil { + return err + } + + blder.gvk, err = apiutil.GVKForObject(typ, blder.mgr.GetScheme()) + if err != nil { + return err + } + + if blder.isThereCustomPathConflict() { + return errors.New("only one of CustomDefaulter or CustomValidator should be set when using WithCustomPath. Otherwise, WithDefaulterCustomPath() and WithValidatorCustomPath() should be used") + } + if blder.customPath != "" { + // isThereCustomPathConflict() already checks for potential conflicts. + // Since we are sure that only one of customDefaulter or customValidator will be used, + // we can set both customDefaulterCustomPath and validatingCustomPath. + blder.customDefaulterCustomPath = blder.customPath + blder.customValidatorCustomPath = blder.customPath + } + + // Register webhook(s) for type + err = blder.registerDefaultingWebhook() + if err != nil { + return err + } + + err = blder.registerValidatingWebhook() + if err != nil { + return err + } + + err = blder.registerConversionWebhook() + if err != nil { + return err + } + return blder.err +} + +// registerDefaultingWebhook registers a defaulting webhook if necessary. +func (blder *WebhookBuilder) registerDefaultingWebhook() error { + mwh := blder.getDefaultingWebhook() + if mwh != nil { + mwh.LogConstructor = blder.logConstructor + mwh.WithContextFunc = blder.contextFunc + path := generateMutatePath(blder.gvk) + if blder.customDefaulterCustomPath != "" { + generatedCustomPath, err := generateCustomPath(blder.customDefaulterCustomPath) + if err != nil { + return err + } + path = generatedCustomPath + } + + // Checking if the path is already registered. + // If so, just skip it. + if !blder.isAlreadyHandled(path) { + log.Info("Registering a mutating webhook", + "GVK", blder.gvk, + "path", path) + blder.mgr.GetWebhookServer().Register(path, mwh) + } + } + + return nil +} + +func (blder *WebhookBuilder) getDefaultingWebhook() *admission.Webhook { + if defaulter := blder.customDefaulter; defaulter != nil { + w := admission.WithCustomDefaulter(blder.mgr.GetScheme(), blder.apiType, defaulter, blder.customDefaulterOpts...) + if blder.recoverPanic != nil { + w = w.WithRecoverPanic(*blder.recoverPanic) + } + return w + } + return nil +} + +// registerValidatingWebhook registers a validating webhook if necessary. +func (blder *WebhookBuilder) registerValidatingWebhook() error { + vwh := blder.getValidatingWebhook() + if vwh != nil { + vwh.LogConstructor = blder.logConstructor + vwh.WithContextFunc = blder.contextFunc + path := generateValidatePath(blder.gvk) + if blder.customValidatorCustomPath != "" { + generatedCustomPath, err := generateCustomPath(blder.customValidatorCustomPath) + if err != nil { + return err + } + path = generatedCustomPath + } + + // Checking if the path is already registered. + // If so, just skip it. + if !blder.isAlreadyHandled(path) { + log.Info("Registering a validating webhook", + "GVK", blder.gvk, + "path", path) + blder.mgr.GetWebhookServer().Register(path, vwh) + } + } + + return nil +} + +func (blder *WebhookBuilder) getValidatingWebhook() *admission.Webhook { + if validator := blder.customValidator; validator != nil { + w := admission.WithCustomValidator(blder.mgr.GetScheme(), blder.apiType, validator) + if blder.recoverPanic != nil { + w = w.WithRecoverPanic(*blder.recoverPanic) + } + return w + } + return nil +} + +func (blder *WebhookBuilder) registerConversionWebhook() error { + ok, err := conversion.IsConvertible(blder.mgr.GetScheme(), blder.apiType) + if err != nil { + log.Error(err, "conversion check failed", "GVK", blder.gvk) + return err + } + if ok { + if !blder.isAlreadyHandled("/convert") { + blder.mgr.GetWebhookServer().Register("/convert", conversion.NewWebhookHandler(blder.mgr.GetScheme())) + } + log.Info("Conversion webhook enabled", "GVK", blder.gvk) + } + + return nil +} + +func (blder *WebhookBuilder) getType() (runtime.Object, error) { + if blder.apiType != nil { + return blder.apiType, nil + } + return nil, errors.New("For() must be called with a valid object") +} + +func (blder *WebhookBuilder) isAlreadyHandled(path string) bool { + if blder.mgr.GetWebhookServer().WebhookMux() == nil { + return false + } + h, p := blder.mgr.GetWebhookServer().WebhookMux().Handler(&http.Request{URL: &url.URL{Path: path}}) + if p == path && h != nil { + return true + } + return false +} + +func generateMutatePath(gvk schema.GroupVersionKind) string { + return "/mutate-" + strings.ReplaceAll(gvk.Group, ".", "-") + "-" + + gvk.Version + "-" + strings.ToLower(gvk.Kind) +} + +func generateValidatePath(gvk schema.GroupVersionKind) string { + return "/validate-" + strings.ReplaceAll(gvk.Group, ".", "-") + "-" + + gvk.Version + "-" + strings.ToLower(gvk.Kind) +} + +const webhookPathStringValidation = `^((/[a-zA-Z0-9-_]+)+|/)$` + +var validWebhookPathRegex = regexp.MustCompile(webhookPathStringValidation) + +func generateCustomPath(customPath string) (string, error) { + if !validWebhookPathRegex.MatchString(customPath) { + return "", errors.New("customPath \"" + customPath + "\" does not match this regex: " + webhookPathStringValidation) + } + return customPath, nil +} diff --git a/pkg/builder/webhook_test.go b/pkg/builder/webhook_test.go new file mode 100644 index 0000000000..72538ef7bf --- /dev/null +++ b/pkg/builder/webhook_test.go @@ -0,0 +1,1216 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/http/httptest" + "os" + "strings" + + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gbytes" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/scheme" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +const ( + admissionReviewGV = `{ + "kind":"AdmissionReview", + "apiVersion":"admission.k8s.io/` + + svcBaseAddr = "http://svc-name.svc-ns.svc" + + customPath = "/custom-path" + + userAgentHeader = "User-Agent" + userAgentCtxKey agentCtxKey = "UserAgent" + userAgentValue = "test" +) + +type agentCtxKey string + +var _ = Describe("webhook", func() { + Describe("New", func() { + Context("v1 AdmissionReview", func() { + runTests("v1") + }) + Context("v1beta1 AdmissionReview", func() { + runTests("v1beta1") + }) + }) +}) + +func runTests(admissionReviewVersion string) { + var ( + stop chan struct{} + logBuffer *gbytes.Buffer + testingLogger logr.Logger + ) + + BeforeEach(func() { + stop = make(chan struct{}) + logBuffer = gbytes.NewBuffer() + testingLogger = zap.New(zap.JSONEncoder(), zap.WriteTo(io.MultiWriter(logBuffer, GinkgoWriter))) + }) + + AfterEach(func() { + close(stop) + }) + + It("should scaffold a custom defaulting webhook", func(specCtx SpecContext) { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testDefaulterGVK.GroupVersion()} + builder.Register(&TestDefaulter{}, &TestDefaulterList{}) + err = builder.AddToScheme(m.GetScheme()) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + err = WebhookManagedBy(m). + For(&TestDefaulter{}). + WithDefaulter(&TestCustomDefaulter{}). + WithLogConstructor(func(base logr.Logger, req *admission.Request) logr.Logger { + return admission.DefaultLogConstructor(testingLogger, req) + }). + Complete() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + svr := m.GetWebhookServer() + ExpectWithOffset(1, svr).NotTo(BeNil()) + + reader := strings.NewReader(admissionReviewGV + admissionReviewVersion + `", + "request":{ + "uid":"07e52e8d-4513-11e9-a716-42010a800270", + "kind":{ + "group":"foo.test.org", + "version":"v1", + "kind":"TestDefaulter" + }, + "resource":{ + "group":"foo.test.org", + "version":"v1", + "resource":"testdefaulter" + }, + "namespace":"default", + "name":"foo", + "operation":"CREATE", + "object":{ + "replica":1 + }, + "oldObject":null + } +}`) + + ctx, cancel := context.WithCancel(specCtx) + cancel() + err = svr.Start(ctx) + if err != nil && !os.IsNotExist(err) { + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + By("sending a request to a mutating webhook path") + path := generateMutatePath(testDefaulterGVK) + req := httptest.NewRequest("POST", svcBaseAddr+path, reader) + req.Header.Add("Content-Type", "application/json") + w := httptest.NewRecorder() + svr.WebhookMux().ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable fields") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":true`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"patch":`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":200`)) + EventuallyWithOffset(1, logBuffer).Should(gbytes.Say(`"msg":"Defaulting object","object":{"name":"foo","namespace":"default"},"namespace":"default","name":"foo","resource":{"group":"foo.test.org","version":"v1","resource":"testdefaulter"},"user":"","requestID":"07e52e8d-4513-11e9-a716-42010a800270"`)) + + By("sending a request to a validating webhook path that doesn't exist") + path = generateValidatePath(testDefaulterGVK) + _, err = reader.Seek(0, 0) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + req = httptest.NewRequest("POST", svcBaseAddr+path, reader) + req.Header.Add("Content-Type", "application/json") + w = httptest.NewRecorder() + svr.WebhookMux().ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusNotFound)) + }) + + It("should scaffold a custom defaulting webhook with a custom path", func(specCtx SpecContext) { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testDefaulterGVK.GroupVersion()} + builder.Register(&TestDefaulter{}, &TestDefaulterList{}) + err = builder.AddToScheme(m.GetScheme()) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + customPath := "/custom-defaulting-path" + err = WebhookManagedBy(m). + For(&TestDefaulter{}). + WithDefaulter(&TestCustomDefaulter{}). + WithLogConstructor(func(base logr.Logger, req *admission.Request) logr.Logger { + return admission.DefaultLogConstructor(testingLogger, req) + }). + WithDefaulterCustomPath(customPath). + Complete() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + svr := m.GetWebhookServer() + ExpectWithOffset(1, svr).NotTo(BeNil()) + + reader := strings.NewReader(admissionReviewGV + admissionReviewVersion + `", + "request":{ + "uid":"07e52e8d-4513-11e9-a716-42010a800270", + "kind":{ + "group":"foo.test.org", + "version":"v1", + "kind":"TestDefaulter" + }, + "resource":{ + "group":"foo.test.org", + "version":"v1", + "resource":"testdefaulter" + }, + "namespace":"default", + "name":"foo", + "operation":"CREATE", + "object":{ + "replica":1 + }, + "oldObject":null + } +}`) + + ctx, cancel := context.WithCancel(specCtx) + cancel() + err = svr.Start(ctx) + if err != nil && !os.IsNotExist(err) { + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + By("sending a request to a mutating webhook path that have been overriten by a custom path") + path, err := generateCustomPath(customPath) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + req := httptest.NewRequest("POST", svcBaseAddr+path, reader) + req.Header.Add("Content-Type", "application/json") + w := httptest.NewRecorder() + svr.WebhookMux().ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable fields") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":true`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"patch":`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":200`)) + EventuallyWithOffset(1, logBuffer).Should(gbytes.Say(`"msg":"Defaulting object","object":{"name":"foo","namespace":"default"},"namespace":"default","name":"foo","resource":{"group":"foo.test.org","version":"v1","resource":"testdefaulter"},"user":"","requestID":"07e52e8d-4513-11e9-a716-42010a800270"`)) + + By("sending a request to a mutating webhook path") + path = generateMutatePath(testDefaulterGVK) + _, err = reader.Seek(0, 0) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + req = httptest.NewRequest("POST", svcBaseAddr+path, reader) + req.Header.Add("Content-Type", "application/json") + w = httptest.NewRecorder() + svr.WebhookMux().ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusNotFound)) + }) + + It("should scaffold a custom defaulting webhook which recovers from panics", func(specCtx SpecContext) { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testDefaulterGVK.GroupVersion()} + builder.Register(&TestDefaulter{}, &TestDefaulterList{}) + err = builder.AddToScheme(m.GetScheme()) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + err = WebhookManagedBy(m). + For(&TestDefaulter{}). + WithDefaulter(&TestCustomDefaulter{}). + RecoverPanic(true). + // RecoverPanic defaults to true. + Complete() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + svr := m.GetWebhookServer() + ExpectWithOffset(1, svr).NotTo(BeNil()) + + reader := strings.NewReader(admissionReviewGV + admissionReviewVersion + `", + "request":{ + "uid":"07e52e8d-4513-11e9-a716-42010a800270", + "kind":{ + "group":"", + "version":"v1", + "kind":"TestDefaulter" + }, + "resource":{ + "group":"", + "version":"v1", + "resource":"testdefaulter" + }, + "namespace":"default", + "operation":"CREATE", + "object":{ + "replica":1, + "panic":true + }, + "oldObject":null + } +}`) + + ctx, cancel := context.WithCancel(specCtx) + cancel() + err = svr.Start(ctx) + if err != nil && !os.IsNotExist(err) { + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + By("sending a request to a mutating webhook path") + path := generateMutatePath(testDefaulterGVK) + req := httptest.NewRequest("POST", svcBaseAddr+path, reader) + req.Header.Add("Content-Type", "application/json") + w := httptest.NewRecorder() + svr.WebhookMux().ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable fields") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":false`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":500`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"message":"panic: fake panic test [recovered]`)) + }) + + It("should scaffold a custom validating webhook", func(specCtx SpecContext) { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testValidatorGVK.GroupVersion()} + builder.Register(&TestValidator{}, &TestValidatorList{}) + err = builder.AddToScheme(m.GetScheme()) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + err = WebhookManagedBy(m). + For(&TestValidator{}). + WithValidator(&TestCustomValidator{}). + WithLogConstructor(func(base logr.Logger, req *admission.Request) logr.Logger { + return admission.DefaultLogConstructor(testingLogger, req) + }). + WithContextFunc(func(ctx context.Context, request *http.Request) context.Context { + return context.WithValue(ctx, userAgentCtxKey, request.Header.Get(userAgentHeader)) + }). + Complete() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + svr := m.GetWebhookServer() + ExpectWithOffset(1, svr).NotTo(BeNil()) + + reader := strings.NewReader(admissionReviewGV + admissionReviewVersion + `", + "request":{ + "uid":"07e52e8d-4513-11e9-a716-42010a800270", + "kind":{ + "group":"foo.test.org", + "version":"v1", + "kind":"TestValidator" + }, + "resource":{ + "group":"foo.test.org", + "version":"v1", + "resource":"testvalidator" + }, + "namespace":"default", + "name":"foo", + "operation":"UPDATE", + "object":{ + "replica":1 + }, + "oldObject":{ + "replica":2 + } + } +}`) + readerWithCxt := strings.NewReader(admissionReviewGV + admissionReviewVersion + `", + "request":{ + "uid":"07e52e8d-4513-11e9-a716-42010a800270", + "kind":{ + "group":"foo.test.org", + "version":"v1", + "kind":"TestValidator" + }, + "resource":{ + "group":"foo.test.org", + "version":"v1", + "resource":"testvalidator" + }, + "namespace":"default", + "name":"foo", + "operation":"UPDATE", + "object":{ + "replica":1 + }, + "oldObject":{ + "replica":1 + } + } +}`) + + ctx, cancel := context.WithCancel(specCtx) + cancel() + err = svr.Start(ctx) + if err != nil && !os.IsNotExist(err) { + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + By("sending a request to a mutating webhook path that doesn't exist") + path := generateMutatePath(testValidatorGVK) + req := httptest.NewRequest("POST", svcBaseAddr+path, reader) + req.Header.Add("Content-Type", "application/json") + w := httptest.NewRecorder() + svr.WebhookMux().ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusNotFound)) + + By("sending a request to a validating webhook path") + path = generateValidatePath(testValidatorGVK) + _, err = reader.Seek(0, 0) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + req = httptest.NewRequest("POST", svcBaseAddr+path, reader) + req.Header.Add("Content-Type", "application/json") + w = httptest.NewRecorder() + svr.WebhookMux().ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable field") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":false`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":403`)) + EventuallyWithOffset(1, logBuffer).Should(gbytes.Say(`"msg":"Validating object","object":{"name":"foo","namespace":"default"},"namespace":"default","name":"foo","resource":{"group":"foo.test.org","version":"v1","resource":"testvalidator"},"user":"","requestID":"07e52e8d-4513-11e9-a716-42010a800270"`)) + + By("sending a request to a validating webhook with context header validation") + path = generateValidatePath(testValidatorGVK) + _, err = readerWithCxt.Seek(0, 0) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + req = httptest.NewRequest("POST", svcBaseAddr+path, readerWithCxt) + req.Header.Add("Content-Type", "application/json") + req.Header.Add(userAgentHeader, userAgentValue) + w = httptest.NewRecorder() + svr.WebhookMux().ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable field") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":true`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":200`)) + }) + + It("should scaffold a custom validating webhook with a custom path", func(specCtx SpecContext) { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testValidatorGVK.GroupVersion()} + builder.Register(&TestValidator{}, &TestValidatorList{}) + err = builder.AddToScheme(m.GetScheme()) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + customPath := "/custom-validating-path" + err = WebhookManagedBy(m). + For(&TestValidator{}). + WithValidator(&TestCustomValidator{}). + WithLogConstructor(func(base logr.Logger, req *admission.Request) logr.Logger { + return admission.DefaultLogConstructor(testingLogger, req) + }). + WithValidatorCustomPath(customPath). + Complete() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + svr := m.GetWebhookServer() + ExpectWithOffset(1, svr).NotTo(BeNil()) + + reader := strings.NewReader(admissionReviewGV + admissionReviewVersion + `", + "request":{ + "uid":"07e52e8d-4513-11e9-a716-42010a800270", + "kind":{ + "group":"foo.test.org", + "version":"v1", + "kind":"TestValidator" + }, + "resource":{ + "group":"foo.test.org", + "version":"v1", + "resource":"testvalidator" + }, + "namespace":"default", + "name":"foo", + "operation":"UPDATE", + "object":{ + "replica":1 + }, + "oldObject":{ + "replica":2 + } + } +}`) + + ctx, cancel := context.WithCancel(specCtx) + cancel() + err = svr.Start(ctx) + if err != nil && !os.IsNotExist(err) { + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + By("sending a request to a valiting webhook path that have been overriten by a custom path") + path, err := generateCustomPath(customPath) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + _, err = reader.Seek(0, 0) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + req := httptest.NewRequest("POST", svcBaseAddr+path, reader) + req.Header.Add("Content-Type", "application/json") + w := httptest.NewRecorder() + svr.WebhookMux().ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable field") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":false`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":403`)) + EventuallyWithOffset(1, logBuffer).Should(gbytes.Say(`"msg":"Validating object","object":{"name":"foo","namespace":"default"},"namespace":"default","name":"foo","resource":{"group":"foo.test.org","version":"v1","resource":"testvalidator"},"user":"","requestID":"07e52e8d-4513-11e9-a716-42010a800270"`)) + + By("sending a request to a validating webhook path") + path = generateValidatePath(testValidatorGVK) + req = httptest.NewRequest("POST", svcBaseAddr+path, reader) + req.Header.Add("Content-Type", "application/json") + w = httptest.NewRecorder() + svr.WebhookMux().ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusNotFound)) + }) + + It("should scaffold a custom validating webhook which recovers from panics", func(specCtx SpecContext) { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testValidatorGVK.GroupVersion()} + builder.Register(&TestValidator{}, &TestValidatorList{}) + err = builder.AddToScheme(m.GetScheme()) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + err = WebhookManagedBy(m). + For(&TestValidator{}). + WithValidator(&TestCustomValidator{}). + RecoverPanic(true). + Complete() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + svr := m.GetWebhookServer() + ExpectWithOffset(1, svr).NotTo(BeNil()) + + reader := strings.NewReader(admissionReviewGV + admissionReviewVersion + `", + "request":{ + "uid":"07e52e8d-4513-11e9-a716-42010a800270", + "kind":{ + "group":"", + "version":"v1", + "kind":"TestValidator" + }, + "resource":{ + "group":"", + "version":"v1", + "resource":"testvalidator" + }, + "namespace":"default", + "operation":"CREATE", + "object":{ + "replica":2, + "panic":true + } + } +}`) + + ctx, cancel := context.WithCancel(specCtx) + cancel() + err = svr.Start(ctx) + if err != nil && !os.IsNotExist(err) { + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + By("sending a request to a validating webhook path") + path := generateValidatePath(testValidatorGVK) + _, err = reader.Seek(0, 0) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + req := httptest.NewRequest("POST", svcBaseAddr+path, reader) + req.Header.Add("Content-Type", "application/json") + w := httptest.NewRecorder() + svr.WebhookMux().ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable field") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":false`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":500`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"message":"panic: fake panic test [recovered]`)) + }) + + It("should scaffold a custom validating webhook to validate deletes", func(specCtx SpecContext) { + By("creating a controller manager") + ctx, cancel := context.WithCancel(specCtx) + + m, err := manager.New(cfg, manager.Options{}) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testValidatorGVK.GroupVersion()} + builder.Register(&TestValidator{}, &TestValidatorList{}) + err = builder.AddToScheme(m.GetScheme()) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + err = WebhookManagedBy(m). + For(&TestValidator{}). + WithValidator(&TestCustomValidator{}). + Complete() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + svr := m.GetWebhookServer() + ExpectWithOffset(1, svr).NotTo(BeNil()) + + reader := strings.NewReader(admissionReviewGV + admissionReviewVersion + `", + "request":{ + "uid":"07e52e8d-4513-11e9-a716-42010a800270", + "kind":{ + "group":"", + "version":"v1", + "kind":"TestValidator" + }, + "resource":{ + "group":"", + "version":"v1", + "resource":"testvalidator" + }, + "namespace":"default", + "operation":"DELETE", + "object":null, + "oldObject":{ + "replica":1 + } + } +}`) + + cancel() + err = svr.Start(ctx) + if err != nil && !os.IsNotExist(err) { + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + By("sending a request to a validating webhook path to check for failed delete") + path := generateValidatePath(testValidatorGVK) + req := httptest.NewRequest("POST", svcBaseAddr+path, reader) + req.Header.Add("Content-Type", "application/json") + w := httptest.NewRecorder() + svr.WebhookMux().ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable field") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":false`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":403`)) + + reader = strings.NewReader(admissionReviewGV + admissionReviewVersion + `", + "request":{ + "uid":"07e52e8d-4513-11e9-a716-42010a800270", + "kind":{ + "group":"", + "version":"v1", + "kind":"TestValidator" + }, + "resource":{ + "group":"", + "version":"v1", + "resource":"testvalidator" + }, + "namespace":"default", + "operation":"DELETE", + "object":null, + "oldObject":{ + "replica":0 + } + } +}`) + By("sending a request to a validating webhook path with correct request") + path = generateValidatePath(testValidatorGVK) + req = httptest.NewRequest("POST", svcBaseAddr+path, reader) + req.Header.Add("Content-Type", "application/json") + w = httptest.NewRecorder() + svr.WebhookMux().ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable field") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":true`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":200`)) + }) + + It("should send an error when trying to register a webhook with more than one For", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testDefaultValidatorGVK.GroupVersion()} + builder.Register(&TestDefaulter{}, &TestDefaulterList{}) + err = builder.AddToScheme(m.GetScheme()) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + err = WebhookManagedBy(m). + For(&TestDefaulter{}). + For(&TestDefaulter{}). + Complete() + Expect(err).To(HaveOccurred()) + }) + + It("should scaffold a custom defaulting and validating webhook", func(specCtx SpecContext) { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testValidatorGVK.GroupVersion()} + builder.Register(&TestDefaultValidator{}, &TestDefaultValidatorList{}) + err = builder.AddToScheme(m.GetScheme()) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + err = WebhookManagedBy(m). + For(&TestDefaultValidator{}). + WithDefaulter(&TestCustomDefaultValidator{}). + WithValidator(&TestCustomDefaultValidator{}). + WithLogConstructor(func(base logr.Logger, req *admission.Request) logr.Logger { + return admission.DefaultLogConstructor(testingLogger, req) + }). + Complete() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + svr := m.GetWebhookServer() + ExpectWithOffset(1, svr).NotTo(BeNil()) + + reader := strings.NewReader(admissionReviewGV + admissionReviewVersion + `", + "request":{ + "uid":"07e52e8d-4513-11e9-a716-42010a800270", + "kind":{ + "group":"foo.test.org", + "version":"v1", + "kind":"TestDefaultValidator" + }, + "resource":{ + "group":"foo.test.org", + "version":"v1", + "resource":"testdefaultvalidator" + }, + "namespace":"default", + "name":"foo", + "operation":"UPDATE", + "object":{ + "replica":1 + }, + "oldObject":{ + "replica":2 + } + } +}`) + + ctx, cancel := context.WithCancel(specCtx) + cancel() + err = svr.Start(ctx) + if err != nil && !os.IsNotExist(err) { + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + By("sending a request to a mutating webhook path") + path := generateMutatePath(testDefaultValidatorGVK) + req := httptest.NewRequest("POST", svcBaseAddr+path, reader) + req.Header.Add("Content-Type", "application/json") + w := httptest.NewRecorder() + svr.WebhookMux().ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable fields") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":true`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"patch":`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":200`)) + EventuallyWithOffset(1, logBuffer).Should(gbytes.Say(`"msg":"Defaulting object","object":{"name":"foo","namespace":"default"},"namespace":"default","name":"foo","resource":{"group":"foo.test.org","version":"v1","resource":"testdefaultvalidator"},"user":"","requestID":"07e52e8d-4513-11e9-a716-42010a800270"`)) + + By("sending a request to a validating webhook path") + path = generateValidatePath(testDefaultValidatorGVK) + _, err = reader.Seek(0, 0) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + req = httptest.NewRequest("POST", svcBaseAddr+path, reader) + req.Header.Add("Content-Type", "application/json") + w = httptest.NewRecorder() + svr.WebhookMux().ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable field") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":false`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":403`)) + EventuallyWithOffset(1, logBuffer).Should(gbytes.Say(`"msg":"Validating object","object":{"name":"foo","namespace":"default"},"namespace":"default","name":"foo","resource":{"group":"foo.test.org","version":"v1","resource":"testdefaultvalidator"},"user":"","requestID":"07e52e8d-4513-11e9-a716-42010a800270"`)) + }) + + It("should scaffold a custom defaulting and validating webhook with a custom path for each of them", func(specCtx SpecContext) { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testValidatorGVK.GroupVersion()} + builder.Register(&TestDefaultValidator{}, &TestDefaultValidatorList{}) + err = builder.AddToScheme(m.GetScheme()) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + validatingCustomPath := "/custom-validating-path" + defaultingCustomPath := "/custom-defaulting-path" + err = WebhookManagedBy(m). + For(&TestDefaultValidator{}). + WithDefaulter(&TestCustomDefaultValidator{}). + WithValidator(&TestCustomDefaultValidator{}). + WithLogConstructor(func(base logr.Logger, req *admission.Request) logr.Logger { + return admission.DefaultLogConstructor(testingLogger, req) + }). + WithValidatorCustomPath(validatingCustomPath). + WithDefaulterCustomPath(defaultingCustomPath). + Complete() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + svr := m.GetWebhookServer() + ExpectWithOffset(1, svr).NotTo(BeNil()) + + reader := strings.NewReader(admissionReviewGV + admissionReviewVersion + `", + "request":{ + "uid":"07e52e8d-4513-11e9-a716-42010a800270", + "kind":{ + "group":"foo.test.org", + "version":"v1", + "kind":"TestDefaultValidator" + }, + "resource":{ + "group":"foo.test.org", + "version":"v1", + "resource":"testdefaultvalidator" + }, + "namespace":"default", + "name":"foo", + "operation":"UPDATE", + "object":{ + "replica":1 + }, + "oldObject":{ + "replica":2 + } + } +}`) + + ctx, cancel := context.WithCancel(specCtx) + cancel() + err = svr.Start(ctx) + if err != nil && !os.IsNotExist(err) { + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + } + + By("sending a request to a mutating webhook path that have been overriten by the custom path") + path, err := generateCustomPath(defaultingCustomPath) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + req := httptest.NewRequest("POST", svcBaseAddr+path, reader) + req.Header.Add("Content-Type", "application/json") + w := httptest.NewRecorder() + svr.WebhookMux().ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable fields") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":true`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"patch":`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":200`)) + EventuallyWithOffset(1, logBuffer).Should(gbytes.Say(`"msg":"Defaulting object","object":{"name":"foo","namespace":"default"},"namespace":"default","name":"foo","resource":{"group":"foo.test.org","version":"v1","resource":"testdefaultvalidator"},"user":"","requestID":"07e52e8d-4513-11e9-a716-42010a800270"`)) + + By("sending a request to a mutating webhook path") + path = generateMutatePath(testDefaultValidatorGVK) + _, err = reader.Seek(0, 0) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + req = httptest.NewRequest("POST", svcBaseAddr+path, reader) + req.Header.Add("Content-Type", "application/json") + w = httptest.NewRecorder() + svr.WebhookMux().ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusNotFound)) + + By("sending a request to a valiting webhook path that have been overriten by a custom path") + path, err = generateCustomPath(validatingCustomPath) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + _, err = reader.Seek(0, 0) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + req = httptest.NewRequest("POST", svcBaseAddr+path, reader) + req.Header.Add("Content-Type", "application/json") + w = httptest.NewRecorder() + svr.WebhookMux().ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusOK)) + By("sanity checking the response contains reasonable field") + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"allowed":false`)) + ExpectWithOffset(1, w.Body).To(ContainSubstring(`"code":403`)) + EventuallyWithOffset(1, logBuffer).Should(gbytes.Say(`"msg":"Validating object","object":{"name":"foo","namespace":"default"},"namespace":"default","name":"foo","resource":{"group":"foo.test.org","version":"v1","resource":"testdefaultvalidator"},"user":"","requestID":"07e52e8d-4513-11e9-a716-42010a800270"`)) + + By("sending a request to a validating webhook path") + path = generateValidatePath(testValidatorGVK) + req = httptest.NewRequest("POST", svcBaseAddr+path, reader) + req.Header.Add("Content-Type", "application/json") + w = httptest.NewRecorder() + svr.WebhookMux().ServeHTTP(w, req) + ExpectWithOffset(1, w.Code).To(Equal(http.StatusNotFound)) + }) + + It("should not scaffold a custom defaulting and a custom validating webhook with the same custom path", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testValidatorGVK.GroupVersion()} + builder.Register(&TestDefaultValidator{}, &TestDefaultValidatorList{}) + err = builder.AddToScheme(m.GetScheme()) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + err = WebhookManagedBy(m). + For(&TestDefaultValidator{}). + WithDefaulter(&TestCustomDefaultValidator{}). + WithValidator(&TestCustomDefaultValidator{}). + WithLogConstructor(func(base logr.Logger, req *admission.Request) logr.Logger { + return admission.DefaultLogConstructor(testingLogger, req) + }). + WithCustomPath(customPath). + Complete() + ExpectWithOffset(1, err).To(HaveOccurred()) + }) + + It("should not scaffold a custom defaulting when setting a custom path and a defaulting custom path", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testValidatorGVK.GroupVersion()} + builder.Register(&TestDefaultValidator{}, &TestDefaultValidatorList{}) + err = builder.AddToScheme(m.GetScheme()) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + err = WebhookManagedBy(m). + For(&TestDefaulter{}). + WithDefaulter(&TestCustomDefaulter{}). + WithLogConstructor(func(base logr.Logger, req *admission.Request) logr.Logger { + return admission.DefaultLogConstructor(testingLogger, req) + }). + WithDefaulterCustomPath(customPath). + WithCustomPath(customPath). + Complete() + ExpectWithOffset(1, err).To(HaveOccurred()) + }) + + It("should not scaffold a custom defaulting when setting a custom path and a validating custom path", func() { + By("creating a controller manager") + m, err := manager.New(cfg, manager.Options{}) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + By("registering the type in the Scheme") + builder := scheme.Builder{GroupVersion: testValidatorGVK.GroupVersion()} + builder.Register(&TestDefaultValidator{}, &TestDefaultValidatorList{}) + err = builder.AddToScheme(m.GetScheme()) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + + err = WebhookManagedBy(m). + For(&TestValidator{}). + WithValidator(&TestCustomValidator{}). + WithLogConstructor(func(base logr.Logger, req *admission.Request) logr.Logger { + return admission.DefaultLogConstructor(testingLogger, req) + }). + WithDefaulterCustomPath(customPath). + WithCustomPath(customPath). + Complete() + ExpectWithOffset(1, err).To(HaveOccurred()) + }) +} + +// TestDefaulter. +var _ runtime.Object = &TestDefaulter{} + +const testDefaulterKind = "TestDefaulter" + +type TestDefaulter struct { + Replica int `json:"replica,omitempty"` + Panic bool `json:"panic,omitempty"` +} + +var testDefaulterGVK = schema.GroupVersionKind{Group: "foo.test.org", Version: "v1", Kind: testDefaulterKind} + +func (d *TestDefaulter) GetObjectKind() schema.ObjectKind { return d } +func (d *TestDefaulter) DeepCopyObject() runtime.Object { + return &TestDefaulter{ + Replica: d.Replica, + } +} + +func (d *TestDefaulter) GroupVersionKind() schema.GroupVersionKind { + return testDefaulterGVK +} + +func (d *TestDefaulter) SetGroupVersionKind(gvk schema.GroupVersionKind) {} + +var _ runtime.Object = &TestDefaulterList{} + +type TestDefaulterList struct{} + +func (*TestDefaulterList) GetObjectKind() schema.ObjectKind { return nil } +func (*TestDefaulterList) DeepCopyObject() runtime.Object { return nil } + +// TestValidator. +var _ runtime.Object = &TestValidator{} + +const testValidatorKind = "TestValidator" + +type TestValidator struct { + Replica int `json:"replica,omitempty"` + Panic bool `json:"panic,omitempty"` +} + +var testValidatorGVK = schema.GroupVersionKind{Group: "foo.test.org", Version: "v1", Kind: testValidatorKind} + +func (v *TestValidator) GetObjectKind() schema.ObjectKind { return v } +func (v *TestValidator) DeepCopyObject() runtime.Object { + return &TestValidator{ + Replica: v.Replica, + } +} + +func (v *TestValidator) GroupVersionKind() schema.GroupVersionKind { + return testValidatorGVK +} + +func (v *TestValidator) SetGroupVersionKind(gvk schema.GroupVersionKind) {} + +var _ runtime.Object = &TestValidatorList{} + +type TestValidatorList struct{} + +func (*TestValidatorList) GetObjectKind() schema.ObjectKind { return nil } +func (*TestValidatorList) DeepCopyObject() runtime.Object { return nil } + +// TestDefaultValidator. +var _ runtime.Object = &TestDefaultValidator{} + +const testDefaultValidatorKind = "TestDefaultValidator" + +type TestDefaultValidator struct { + metav1.TypeMeta + metav1.ObjectMeta + + Replica int `json:"replica,omitempty"` +} + +var testDefaultValidatorGVK = schema.GroupVersionKind{Group: "foo.test.org", Version: "v1", Kind: "TestDefaultValidator"} + +func (dv *TestDefaultValidator) GetObjectKind() schema.ObjectKind { return dv } +func (dv *TestDefaultValidator) DeepCopyObject() runtime.Object { + return &TestDefaultValidator{ + Replica: dv.Replica, + } +} + +func (dv *TestDefaultValidator) GroupVersionKind() schema.GroupVersionKind { + return testDefaultValidatorGVK +} + +func (dv *TestDefaultValidator) SetGroupVersionKind(gvk schema.GroupVersionKind) {} + +var _ runtime.Object = &TestDefaultValidatorList{} + +type TestDefaultValidatorList struct{} + +func (*TestDefaultValidatorList) GetObjectKind() schema.ObjectKind { return nil } +func (*TestDefaultValidatorList) DeepCopyObject() runtime.Object { return nil } + +// TestCustomDefaulter. +type TestCustomDefaulter struct{} + +func (*TestCustomDefaulter) Default(ctx context.Context, obj runtime.Object) error { + logf.FromContext(ctx).Info("Defaulting object") + req, err := admission.RequestFromContext(ctx) + if err != nil { + return fmt.Errorf("expected admission.Request in ctx: %w", err) + } + if req.Kind.Kind != testDefaulterKind { + return fmt.Errorf("expected Kind TestDefaulter got %q", req.Kind.Kind) + } + + d := obj.(*TestDefaulter) //nolint:ifshort + if d.Panic { + panic("fake panic test") + } + + if d.Replica < 2 { + d.Replica = 2 + } + + return nil +} + +var _ admission.CustomDefaulter = &TestCustomDefaulter{} + +// TestCustomValidator. + +type TestCustomValidator struct{} + +func (*TestCustomValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + logf.FromContext(ctx).Info("Validating object") + req, err := admission.RequestFromContext(ctx) + if err != nil { + return nil, fmt.Errorf("expected admission.Request in ctx: %w", err) + } + if req.Kind.Kind != testValidatorKind { + return nil, fmt.Errorf("expected Kind TestValidator got %q", req.Kind.Kind) + } + + v := obj.(*TestValidator) //nolint:ifshort + if v.Panic { + panic("fake panic test") + } + if v.Replica < 0 { + return nil, errors.New("number of replica should be greater than or equal to 0") + } + + return nil, nil +} + +func (*TestCustomValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + logf.FromContext(ctx).Info("Validating object") + req, err := admission.RequestFromContext(ctx) + if err != nil { + return nil, fmt.Errorf("expected admission.Request in ctx: %w", err) + } + if req.Kind.Kind != testValidatorKind { + return nil, fmt.Errorf("expected Kind TestValidator got %q", req.Kind.Kind) + } + + v := newObj.(*TestValidator) + old := oldObj.(*TestValidator) + if v.Replica < 0 { + return nil, errors.New("number of replica should be greater than or equal to 0") + } + if v.Replica < old.Replica { + return nil, fmt.Errorf("new replica %v should not be fewer than old replica %v", v.Replica, old.Replica) + } + + userAgent, ok := ctx.Value(userAgentCtxKey).(string) + if ok && userAgent != userAgentValue { + return nil, fmt.Errorf("expected %s value is %q in TestCustomValidator got %q", userAgentCtxKey, userAgentValue, userAgent) + } + + return nil, nil +} + +func (*TestCustomValidator) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + logf.FromContext(ctx).Info("Validating object") + req, err := admission.RequestFromContext(ctx) + if err != nil { + return nil, fmt.Errorf("expected admission.Request in ctx: %w", err) + } + if req.Kind.Kind != testValidatorKind { + return nil, fmt.Errorf("expected Kind TestValidator got %q", req.Kind.Kind) + } + + v := obj.(*TestValidator) //nolint:ifshort + if v.Replica > 0 { + return nil, errors.New("number of replica should be less than or equal to 0 to delete") + } + return nil, nil +} + +var _ admission.CustomValidator = &TestCustomValidator{} + +// TestCustomDefaultValidator for default +type TestCustomDefaultValidator struct{} + +func (*TestCustomDefaultValidator) Default(ctx context.Context, obj runtime.Object) error { + logf.FromContext(ctx).Info("Defaulting object") + req, err := admission.RequestFromContext(ctx) + if err != nil { + return fmt.Errorf("expected admission.Request in ctx: %w", err) + } + if req.Kind.Kind != testDefaultValidatorKind { + return fmt.Errorf("expected Kind TestDefaultValidator got %q", req.Kind.Kind) + } + + d := obj.(*TestDefaultValidator) //nolint:ifshort + + if d.Replica < 2 { + d.Replica = 2 + } + return nil +} + +var _ admission.CustomDefaulter = &TestCustomDefaulter{} + +// TestCustomDefaultValidator for validation + +func (*TestCustomDefaultValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + logf.FromContext(ctx).Info("Validating object") + req, err := admission.RequestFromContext(ctx) + if err != nil { + return nil, fmt.Errorf("expected admission.Request in ctx: %w", err) + } + if req.Kind.Kind != testDefaultValidatorKind { + return nil, fmt.Errorf("expected Kind TestDefaultValidator got %q", req.Kind.Kind) + } + + v := obj.(*TestDefaultValidator) //nolint:ifshort + if v.Replica < 0 { + return nil, errors.New("number of replica should be greater than or equal to 0") + } + return nil, nil +} + +func (*TestCustomDefaultValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + logf.FromContext(ctx).Info("Validating object") + req, err := admission.RequestFromContext(ctx) + if err != nil { + return nil, fmt.Errorf("expected admission.Request in ctx: %w", err) + } + if req.Kind.Kind != testDefaultValidatorKind { + return nil, fmt.Errorf("expected Kind TestDefaultValidator got %q", req.Kind.Kind) + } + + v := newObj.(*TestDefaultValidator) + old := oldObj.(*TestDefaultValidator) + if v.Replica < 0 { + return nil, errors.New("number of replica should be greater than or equal to 0") + } + if v.Replica < old.Replica { + return nil, fmt.Errorf("new replica %v should not be fewer than old replica %v", v.Replica, old.Replica) + } + return nil, nil +} + +func (*TestCustomDefaultValidator) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + logf.FromContext(ctx).Info("Validating object") + req, err := admission.RequestFromContext(ctx) + if err != nil { + return nil, fmt.Errorf("expected admission.Request in ctx: %w", err) + } + if req.Kind.Kind != testDefaultValidatorKind { + return nil, fmt.Errorf("expected Kind TestDefaultValidator got %q", req.Kind.Kind) + } + + v := obj.(*TestDefaultValidator) //nolint:ifshort + if v.Replica > 0 { + return nil, errors.New("number of replica should be less than or equal to 0 to delete") + } + return nil, nil +} + +var _ admission.CustomValidator = &TestCustomValidator{} diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go index 03a86d3fc7..a7e491855a 100644 --- a/pkg/cache/cache.go +++ b/pkg/cache/cache.go @@ -17,103 +17,457 @@ limitations under the License. package cache import ( + "context" "fmt" + "maps" + "net/http" + "slices" + "sort" "time" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" toolscache "k8s.io/client-go/tools/cache" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/cache/internal" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - logf "sigs.k8s.io/controller-runtime/pkg/internal/log" ) -var log = logf.RuntimeLog.WithName("object-cache") +var ( + defaultSyncPeriod = 10 * time.Hour +) + +// InformerGetOptions defines the behavior of how informers are retrieved. +type InformerGetOptions internal.GetOptions + +// InformerGetOption defines an option that alters the behavior of how informers are retrieved. +type InformerGetOption func(*InformerGetOptions) + +// BlockUntilSynced determines whether a get request for an informer should block +// until the informer's cache has synced. +func BlockUntilSynced(shouldBlock bool) InformerGetOption { + return func(opts *InformerGetOptions) { + opts.BlockUntilSynced = &shouldBlock + } +} // Cache knows how to load Kubernetes objects, fetch informers to request // to receive events for Kubernetes objects (at a low-level), -// and add indicies to fields on the objects stored in the cache. +// and add indices to fields on the objects stored in the cache. type Cache interface { - // Cache acts as a client to objects stored in the cache. + // Reader acts as a client to objects stored in the cache. client.Reader - // Cache loads informers and adds field indicies. + // Informers loads informers and adds field indices. Informers } // Informers knows how to create or fetch informers for different -// group-version-kinds, and add indicies to those informers. It's safe to call +// group-version-kinds, and add indices to those informers. It's safe to call // GetInformer from multiple threads. type Informers interface { // GetInformer fetches or constructs an informer for the given object that corresponds to a single // API kind and resource. - GetInformer(obj runtime.Object) (Informer, error) + GetInformer(ctx context.Context, obj client.Object, opts ...InformerGetOption) (Informer, error) // GetInformerForKind is similar to GetInformer, except that it takes a group-version-kind, instead // of the underlying object. - GetInformerForKind(gvk schema.GroupVersionKind) (Informer, error) + GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...InformerGetOption) (Informer, error) - // Start runs all the informers known to this cache until the given channel is closed. + // RemoveInformer removes an informer entry and stops it if it was running. + RemoveInformer(ctx context.Context, obj client.Object) error + + // Start runs all the informers known to this cache until the context is closed. // It blocks. - Start(stopCh <-chan struct{}) error + Start(ctx context.Context) error - // WaitForCacheSync waits for all the caches to sync. Returns false if it could not sync a cache. - WaitForCacheSync(stop <-chan struct{}) bool + // WaitForCacheSync waits for all the caches to sync. Returns false if it could not sync a cache. + WaitForCacheSync(ctx context.Context) bool - // Informers knows how to add indicies to the caches (informers) that it manages. + // FieldIndexer adds indices to the managed informers. client.FieldIndexer } -// Informer - informer allows you interact with the underlying informer +// Informer allows you to interact with the underlying informer. type Informer interface { // AddEventHandler adds an event handler to the shared informer using the shared informer's resync - // period. Events to a single handler are delivered sequentially, but there is no coordination + // period. Events to a single handler are delivered sequentially, but there is no coordination // between different handlers. - AddEventHandler(handler toolscache.ResourceEventHandler) + // It returns a registration handle for the handler that can be used to remove + // the handler again and an error if the handler cannot be added. + AddEventHandler(handler toolscache.ResourceEventHandler) (toolscache.ResourceEventHandlerRegistration, error) + // AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the - // specified resync period. Events to a single handler are delivered sequentially, but there is + // specified resync period. Events to a single handler are delivered sequentially, but there is // no coordination between different handlers. - AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) - // AddIndexers adds more indexers to this store. If you call this after you already have data - // in the store, the results are undefined. + // It returns a registration handle for the handler that can be used to remove + // the handler again and an error if the handler cannot be added. + AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) (toolscache.ResourceEventHandlerRegistration, error) + + // AddEventHandlerWithOptions is a variant of AddEventHandlerWithResyncPeriod where + // all optional parameters are passed in as a struct. + AddEventHandlerWithOptions(handler toolscache.ResourceEventHandler, options toolscache.HandlerOptions) (toolscache.ResourceEventHandlerRegistration, error) + + // RemoveEventHandler removes a previously added event handler given by + // its registration handle. + // This function is guaranteed to be idempotent and thread-safe. + RemoveEventHandler(handle toolscache.ResourceEventHandlerRegistration) error + + // AddIndexers adds indexers to this store. It is valid to add indexers + // after an informer was started. AddIndexers(indexers toolscache.Indexers) error - //HasSynced return true if the informers underlying store has synced + + // HasSynced return true if the informers underlying store has synced. HasSynced() bool + // IsStopped returns true if the informer has been stopped. + IsStopped() bool } -// Options are the optional arguments for creating a new InformersMap object +// AllNamespaces should be used as the map key to deliminate namespace settings +// that apply to all namespaces that themselves do not have explicit settings. +const AllNamespaces = metav1.NamespaceAll + +// Options are the optional arguments for creating a new Cache object. type Options struct { + // HTTPClient is the http client to use for the REST client + HTTPClient *http.Client + // Scheme is the scheme to use for mapping objects to GroupVersionKinds Scheme *runtime.Scheme // Mapper is the RESTMapper to use for mapping GroupVersionKinds to Resources Mapper meta.RESTMapper - // Resync is the resync period. Defaults to defaultResyncTime. - Resync *time.Duration + // SyncPeriod determines the minimum frequency at which watched resources are + // reconciled. A lower period will correct entropy more quickly, but reduce + // responsiveness to change if there are many watched resources. Change this + // value only if you know what you are doing. Defaults to 10 hours if unset. + // there will a 10 percent jitter between the SyncPeriod of all controllers + // so that all controllers will not send list requests simultaneously. + // + // This applies to all controllers. + // + // A period sync happens for two reasons: + // 1. To insure against a bug in the controller that causes an object to not + // be requeued, when it otherwise should be requeued. + // 2. To insure against an unknown bug in controller-runtime, or its dependencies, + // that causes an object to not be requeued, when it otherwise should be + // requeued, or to be removed from the queue, when it otherwise should not + // be removed. + // + // If you want + // 1. to insure against missed watch events, or + // 2. to poll services that cannot be watched, + // then we recommend that, instead of changing the default period, the + // controller requeue, with a constant duration `t`, whenever the controller + // is "done" with an object, and would otherwise not requeue it, i.e., we + // recommend the `Reconcile` function return `reconcile.Result{RequeueAfter: t}`, + // instead of `reconcile.Result{}`. + // + // SyncPeriod will locally trigger an artificial Update event with the same + // object in both ObjectOld and ObjectNew for everything that is in the + // cache. + // + // Predicates or Handlers that expect ObjectOld and ObjectNew to be different + // (such as GenerationChangedPredicate) will filter out this event, preventing + // it from triggering a reconciliation. + // SyncPeriod does not sync between the local cache and the server. + SyncPeriod *time.Duration + + // ReaderFailOnMissingInformer configures the cache to return a ErrResourceNotCached error when a user + // requests, using Get() and List(), a resource the cache does not already have an informer for. + // + // This error is distinct from an errors.NotFound. + // + // Defaults to false, which means that the cache will start a new informer + // for every new requested resource. + ReaderFailOnMissingInformer bool + + // DefaultNamespaces maps namespace names to cache configs. If set, only + // the namespaces in here will be watched and it will by used to default + // ByObject.Namespaces for all objects if that is nil. + // + // It is possible to have specific Config for just some namespaces + // but cache all namespaces by using the AllNamespaces const as the map key. + // This will then include all namespaces that do not have a more specific + // setting. + // + // The options in the Config that are nil will be defaulted from + // the respective Default* settings. + DefaultNamespaces map[string]Config + + // DefaultLabelSelector will be used as a label selector for all objects + // unless there is already one set in ByObject or DefaultNamespaces. + DefaultLabelSelector labels.Selector + + // DefaultFieldSelector will be used as a field selector for all object types + // unless there is already one set in ByObject or DefaultNamespaces. + DefaultFieldSelector fields.Selector + + // DefaultTransform will be used as transform for all object types + // unless there is already one set in ByObject or DefaultNamespaces. + // + // A typical usecase for this is to use TransformStripManagedFields + // to reduce the caches memory usage. + DefaultTransform toolscache.TransformFunc + + // DefaultWatchErrorHandler will be used to set the WatchErrorHandler which is called + // whenever ListAndWatch drops the connection with an error. + // + // After calling this handler, the informer will backoff and retry. + DefaultWatchErrorHandler toolscache.WatchErrorHandlerWithContext - // Namespace restricts the cache's ListWatch to the desired namespace - // Default watches all namespaces - Namespace string + // DefaultUnsafeDisableDeepCopy is the default for UnsafeDisableDeepCopy + // for everything that doesn't specify this. + // + // Be very careful with this, when enabled you must DeepCopy any object before mutating it, + // otherwise you will mutate the object in the cache. + // + // This will be used for all object types, unless it is set in ByObject or + // DefaultNamespaces. + DefaultUnsafeDisableDeepCopy *bool + + // DefaultEnableWatchBookmarks requests watch events with type "BOOKMARK". + // Servers that do not implement bookmarks may ignore this flag and + // bookmarks are sent at the server's discretion. Clients should not + // assume bookmarks are returned at any specific interval, nor may they + // assume the server will send any BOOKMARK event during a session. + // + // This will be used for all object types, unless it is set in ByObject or + // DefaultNamespaces. + // + // Defaults to true. + DefaultEnableWatchBookmarks *bool + + // ByObject restricts the cache's ListWatch to the desired fields per GVK at the specified object. + // If unset, this will fall through to the Default* settings. + ByObject map[client.Object]ByObject + + // NewInformer allows overriding of NewSharedIndexInformer, for example for testing + // or if someone wants to write their own Informer. + NewInformer func(toolscache.ListerWatcher, runtime.Object, time.Duration, toolscache.Indexers) toolscache.SharedIndexInformer } -var defaultResyncTime = 10 * time.Hour +// ByObject offers more fine-grained control over the cache's ListWatch by object. +type ByObject struct { + // Namespaces maps a namespace name to cache configs. If set, only the + // namespaces in this map will be cached. + // + // Settings in the map value that are unset will be defaulted. + // Use an empty value for the specific setting to prevent that. + // + // It is possible to have specific Config for just some namespaces + // but cache all namespaces by using the AllNamespaces const as the map key. + // This will then include all namespaces that do not have a more specific + // setting. + // + // A nil map allows to default this to the cache's DefaultNamespaces setting. + // An empty map prevents this and means that all namespaces will be cached. + // + // The defaulting follows the following precedence order: + // 1. ByObject + // 2. DefaultNamespaces[namespace] + // 3. Default* + // + // This must be unset for cluster-scoped objects. + Namespaces map[string]Config + + // Label represents a label selector for the object. + Label labels.Selector + + // Field represents a field selector for the object. + Field fields.Selector + + // Transform is a transformer function for the object which gets applied + // when objects of the transformation are about to be committed to the cache. + // + // This function is called both for new objects to enter the cache, + // and for updated objects. + Transform toolscache.TransformFunc + + // UnsafeDisableDeepCopy indicates not to deep copy objects during get or + // list objects per GVK at the specified object. + // Be very careful with this, when enabled you must DeepCopy any object before mutating it, + // otherwise you will mutate the object in the cache. + UnsafeDisableDeepCopy *bool + + // EnableWatchBookmarks requests watch events with type "BOOKMARK". + // Servers that do not implement bookmarks may ignore this flag and + // bookmarks are sent at the server's discretion. Clients should not + // assume bookmarks are returned at any specific interval, nor may they + // assume the server will send any BOOKMARK event during a session. + // + // Defaults to true. + EnableWatchBookmarks *bool +} + +// Config describes all potential options for a given watch. +type Config struct { + // LabelSelector specifies a label selector. A nil value allows to + // default this. + // + // Set to labels.Everything() if you don't want this defaulted. + LabelSelector labels.Selector + + // FieldSelector specifics a field selector. A nil value allows to + // default this. + // + // Set to fields.Everything() if you don't want this defaulted. + FieldSelector fields.Selector + + // Transform specifies a transform func. A nil value allows to default + // this. + // + // Set to an empty func to prevent this: + // func(in interface{}) (interface{}, error) { return in, nil } + Transform toolscache.TransformFunc + + // UnsafeDisableDeepCopy specifies if List and Get requests against the + // cache should not DeepCopy. A nil value allows to default this. + UnsafeDisableDeepCopy *bool + + // EnableWatchBookmarks requests watch events with type "BOOKMARK". + // Servers that do not implement bookmarks may ignore this flag and + // bookmarks are sent at the server's discretion. Clients should not + // assume bookmarks are returned at any specific interval, nor may they + // assume the server will send any BOOKMARK event during a session. + // + // Defaults to true. + EnableWatchBookmarks *bool +} + +// NewCacheFunc - Function for creating a new cache from the options and a rest config. +type NewCacheFunc func(config *rest.Config, opts Options) (Cache, error) // New initializes and returns a new Cache. -func New(config *rest.Config, opts Options) (Cache, error) { - opts, err := defaultOpts(config, opts) +func New(cfg *rest.Config, opts Options) (Cache, error) { + opts, err := defaultOpts(cfg, opts) if err != nil { return nil, err } - im := internal.NewInformersMap(config, opts.Scheme, opts.Mapper, *opts.Resync, opts.Namespace) - return &informerCache{InformersMap: im}, nil + + newCacheFunc := newCache(cfg, opts) + + var defaultCache Cache + if len(opts.DefaultNamespaces) > 0 { + defaultConfig := optionDefaultsToConfig(&opts) + defaultCache = newMultiNamespaceCache(newCacheFunc, opts.Scheme, opts.Mapper, opts.DefaultNamespaces, &defaultConfig) + } else { + defaultCache = newCacheFunc(optionDefaultsToConfig(&opts), corev1.NamespaceAll) + } + + if len(opts.ByObject) == 0 { + return defaultCache, nil + } + + delegating := &delegatingByGVKCache{ + scheme: opts.Scheme, + caches: make(map[schema.GroupVersionKind]Cache, len(opts.ByObject)), + defaultCache: defaultCache, + } + + for obj, config := range opts.ByObject { + gvk, err := apiutil.GVKForObject(obj, opts.Scheme) + if err != nil { + return nil, fmt.Errorf("failed to get GVK for type %T: %w", obj, err) + } + var cache Cache + if len(config.Namespaces) > 0 { + cache = newMultiNamespaceCache(newCacheFunc, opts.Scheme, opts.Mapper, config.Namespaces, nil) + } else { + cache = newCacheFunc(byObjectToConfig(config), corev1.NamespaceAll) + } + delegating.caches[gvk] = cache + } + + return delegating, nil +} + +// TransformStripManagedFields strips the managed fields of an object before it is committed to the cache. +// If you are not explicitly accessing managedFields from your code, setting this as `DefaultTransform` +// on the cache can lead to a significant reduction in memory usage. +func TransformStripManagedFields() toolscache.TransformFunc { + return func(in any) (any, error) { + // Nilcheck managed fields to avoid hitting https://github.com/kubernetes/kubernetes/issues/124337 + if obj, err := meta.Accessor(in); err == nil && obj.GetManagedFields() != nil { + obj.SetManagedFields(nil) + } + + return in, nil + } +} + +func optionDefaultsToConfig(opts *Options) Config { + return Config{ + LabelSelector: opts.DefaultLabelSelector, + FieldSelector: opts.DefaultFieldSelector, + Transform: opts.DefaultTransform, + UnsafeDisableDeepCopy: opts.DefaultUnsafeDisableDeepCopy, + EnableWatchBookmarks: opts.DefaultEnableWatchBookmarks, + } +} + +func byObjectToConfig(byObject ByObject) Config { + return Config{ + LabelSelector: byObject.Label, + FieldSelector: byObject.Field, + Transform: byObject.Transform, + UnsafeDisableDeepCopy: byObject.UnsafeDisableDeepCopy, + EnableWatchBookmarks: byObject.EnableWatchBookmarks, + } +} + +type newCacheFunc func(config Config, namespace string) Cache + +func newCache(restConfig *rest.Config, opts Options) newCacheFunc { + return func(config Config, namespace string) Cache { + return &informerCache{ + scheme: opts.Scheme, + Informers: internal.NewInformers(restConfig, &internal.InformersOpts{ + HTTPClient: opts.HTTPClient, + Scheme: opts.Scheme, + Mapper: opts.Mapper, + ResyncPeriod: *opts.SyncPeriod, + Namespace: namespace, + Selector: internal.Selector{ + Label: config.LabelSelector, + Field: config.FieldSelector, + }, + Transform: config.Transform, + WatchErrorHandler: opts.DefaultWatchErrorHandler, + UnsafeDisableDeepCopy: ptr.Deref(config.UnsafeDisableDeepCopy, false), + EnableWatchBookmarks: ptr.Deref(config.EnableWatchBookmarks, true), + NewInformer: opts.NewInformer, + }), + readerFailOnMissingInformer: opts.ReaderFailOnMissingInformer, + } + } } func defaultOpts(config *rest.Config, opts Options) (Options, error) { + config = rest.CopyConfig(config) + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // Use the rest HTTP client for the provided config if unset + if opts.HTTPClient == nil { + var err error + opts.HTTPClient, err = rest.HTTPClientFor(config) + if err != nil { + return Options{}, fmt.Errorf("could not create HTTP client from config: %w", err) + } + } + // Use the default Kubernetes Scheme if unset if opts.Scheme == nil { opts.Scheme = scheme.Scheme @@ -122,16 +476,126 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { // Construct a new Mapper if unset if opts.Mapper == nil { var err error - opts.Mapper, err = apiutil.NewDiscoveryRESTMapper(config) + opts.Mapper, err = apiutil.NewDynamicRESTMapper(config, opts.HTTPClient) if err != nil { - log.WithName("setup").Error(err, "Failed to get API Group-Resources") - return opts, fmt.Errorf("could not create RESTMapper from config") + return Options{}, fmt.Errorf("could not create RESTMapper from config: %w", err) } } + opts.ByObject = maps.Clone(opts.ByObject) + opts.DefaultNamespaces = maps.Clone(opts.DefaultNamespaces) + for obj, byObject := range opts.ByObject { + isNamespaced, err := apiutil.IsObjectNamespaced(obj, opts.Scheme, opts.Mapper) + if err != nil { + return opts, fmt.Errorf("failed to determine if %T is namespaced: %w", obj, err) + } + if !isNamespaced && byObject.Namespaces != nil { + return opts, fmt.Errorf("type %T is not namespaced, but its ByObject.Namespaces setting is not nil", obj) + } + + if isNamespaced && byObject.Namespaces == nil { + byObject.Namespaces = maps.Clone(opts.DefaultNamespaces) + } else { + byObject.Namespaces = maps.Clone(byObject.Namespaces) + } + + // Default the namespace-level configs first, because they need to use the undefaulted type-level config + // to be able to potentially fall through to settings from DefaultNamespaces. + for namespace, config := range byObject.Namespaces { + // 1. Default from the undefaulted type-level config + config = defaultConfig(config, byObjectToConfig(byObject)) + // 2. Default from the namespace-level config. This was defaulted from the global default config earlier, but + // might not have an entry for the current namespace. + if defaultNamespaceSettings, hasDefaultNamespace := opts.DefaultNamespaces[namespace]; hasDefaultNamespace { + config = defaultConfig(config, defaultNamespaceSettings) + } + + // 3. Default from the global defaults + config = defaultConfig(config, optionDefaultsToConfig(&opts)) + + if namespace == metav1.NamespaceAll { + config.FieldSelector = fields.AndSelectors( + appendIfNotNil( + namespaceAllSelector(slices.Collect(maps.Keys(byObject.Namespaces))), + config.FieldSelector, + )..., + ) + } + + byObject.Namespaces[namespace] = config + } + + // Only default ByObject iself if it isn't namespaced or has no namespaces configured, as only + // then any of this will be honored. + if !isNamespaced || len(byObject.Namespaces) == 0 { + defaultedConfig := defaultConfig(byObjectToConfig(byObject), optionDefaultsToConfig(&opts)) + byObject.Label = defaultedConfig.LabelSelector + byObject.Field = defaultedConfig.FieldSelector + byObject.Transform = defaultedConfig.Transform + byObject.UnsafeDisableDeepCopy = defaultedConfig.UnsafeDisableDeepCopy + byObject.EnableWatchBookmarks = defaultedConfig.EnableWatchBookmarks + } + + opts.ByObject[obj] = byObject + } + + // Default namespaces after byObject has been defaulted, otherwise a namespace without selectors + // will get the `Default` selectors, then get copied to byObject and then not get defaulted from + // byObject, as it already has selectors. + for namespace, cfg := range opts.DefaultNamespaces { + cfg = defaultConfig(cfg, optionDefaultsToConfig(&opts)) + if namespace == metav1.NamespaceAll { + cfg.FieldSelector = fields.AndSelectors( + appendIfNotNil( + namespaceAllSelector(slices.Collect(maps.Keys(opts.DefaultNamespaces))), + cfg.FieldSelector, + )..., + ) + } + opts.DefaultNamespaces[namespace] = cfg + } + // Default the resync period to 10 hours if unset - if opts.Resync == nil { - opts.Resync = &defaultResyncTime + if opts.SyncPeriod == nil { + opts.SyncPeriod = &defaultSyncPeriod } return opts, nil } + +func defaultConfig(toDefault, defaultFrom Config) Config { + if toDefault.LabelSelector == nil { + toDefault.LabelSelector = defaultFrom.LabelSelector + } + if toDefault.FieldSelector == nil { + toDefault.FieldSelector = defaultFrom.FieldSelector + } + if toDefault.Transform == nil { + toDefault.Transform = defaultFrom.Transform + } + if toDefault.UnsafeDisableDeepCopy == nil { + toDefault.UnsafeDisableDeepCopy = defaultFrom.UnsafeDisableDeepCopy + } + if toDefault.EnableWatchBookmarks == nil { + toDefault.EnableWatchBookmarks = defaultFrom.EnableWatchBookmarks + } + return toDefault +} + +func namespaceAllSelector(namespaces []string) []fields.Selector { + selectors := make([]fields.Selector, 0, len(namespaces)-1) + sort.Strings(namespaces) + for _, namespace := range namespaces { + if namespace != metav1.NamespaceAll { + selectors = append(selectors, fields.OneTermNotEqualSelector("metadata.namespace", namespace)) + } + } + + return selectors +} + +func appendIfNotNil[T comparable](a []T, b T) []T { + if b != *new(T) { + return append(a, b) + } + return a +} diff --git a/pkg/cache/cache_suite_test.go b/pkg/cache/cache_suite_test.go index 537dad7a28..a9a5152ce8 100644 --- a/pkg/cache/cache_suite_test.go +++ b/pkg/cache/cache_suite_test.go @@ -19,7 +19,7 @@ package cache_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -30,15 +30,15 @@ import ( func TestSource(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, "Cache Suite", []Reporter{envtest.NewlineReporter{}}) + RunSpecs(t, "Cache Suite") } var testenv *envtest.Environment var cfg *rest.Config var clientset *kubernetes.Clientset -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) testenv = &envtest.Environment{} @@ -48,9 +48,7 @@ var _ = BeforeSuite(func(done Done) { clientset, err = kubernetes.NewForConfig(cfg) Expect(err).NotTo(HaveOccurred()) - - close(done) -}, 60) +}) var _ = AfterSuite(func() { Expect(testenv.Stop()).To(Succeed()) diff --git a/pkg/cache/cache_test.go b/pkg/cache/cache_test.go index 9490e3ed7a..7748e2e317 100644 --- a/pkg/cache/cache_test.go +++ b/pkg/cache/cache_test.go @@ -18,134 +18,678 @@ package cache_test import ( "context" + "errors" "fmt" + "reflect" + "sort" + "strconv" + "strings" + "time" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - kcorev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - kmetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" kscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" kcache "k8s.io/client-go/tools/cache" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" ) +const testNodeOne = "test-node-1" +const testNodeTwo = "test-node-2" const testNamespaceOne = "test-namespace-1" const testNamespaceTwo = "test-namespace-2" const testNamespaceThree = "test-namespace-3" // TODO(community): Pull these helper functions into testenv. // Restart policy is included to allow indexing on that field. -func createPod(name, namespace string, restartPolicy kcorev1.RestartPolicy) runtime.Object { +func createPodWithLabels(ctx context.Context, name, namespace string, restartPolicy corev1.RestartPolicy, labels map[string]string) client.Object { three := int64(3) - pod := &kcorev1.Pod{ - ObjectMeta: kmetav1.ObjectMeta{ + if labels == nil { + labels = map[string]string{} + } + labels["test-label"] = name + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, - Labels: map[string]string{ - "test-label": name, - }, + Labels: labels, }, - Spec: kcorev1.PodSpec{ - Containers: []kcorev1.Container{{Name: "nginx", Image: "nginx"}}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}, RestartPolicy: restartPolicy, ActiveDeadlineSeconds: &three, }, } cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) - err = cl.Create(context.Background(), pod) + err = cl.Create(ctx, pod) Expect(err).NotTo(HaveOccurred()) return pod } -func deletePod(pod runtime.Object) { +func createSvc(ctx context.Context, name, namespace string, cl client.Client) client.Object { + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{Port: 1}}, + }, + } + err := cl.Create(ctx, svc) + Expect(err).NotTo(HaveOccurred()) + return svc +} + +func createSA(ctx context.Context, name, namespace string, cl client.Client) client.Object { + sa := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + err := cl.Create(ctx, sa) + Expect(err).NotTo(HaveOccurred()) + return sa +} + +func createPod(ctx context.Context, name, namespace string, restartPolicy corev1.RestartPolicy) client.Object { + return createPodWithLabels(ctx, name, namespace, restartPolicy, nil) +} + +func deletePod(ctx context.Context, pod client.Object) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) - err = cl.Delete(context.Background(), pod) + err = cl.Delete(ctx, pod) Expect(err).NotTo(HaveOccurred()) } var _ = Describe("Informer Cache", func() { - CacheTest(cache.New) + CacheTest(cache.New, cache.Options{}) + NonBlockingGetTest(cache.New, cache.Options{}) +}) + +var _ = Describe("Informer Cache with ReaderFailOnMissingInformer", func() { + CacheTestReaderFailOnMissingInformer(cache.New, cache.Options{ReaderFailOnMissingInformer: true}) +}) + +var _ = Describe("Multi-Namespace Informer Cache", func() { + CacheTest(cache.New, cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + cache.AllNamespaces: {FieldSelector: fields.OneTermEqualSelector("metadata.namespace", testNamespaceOne)}, + testNamespaceTwo: {}, + "default": {}, + }, + }) + NonBlockingGetTest(cache.New, cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + cache.AllNamespaces: {FieldSelector: fields.OneTermEqualSelector("metadata.namespace", testNamespaceOne)}, + testNamespaceTwo: {}, + "default": {}, + }, + }) +}) + +var _ = Describe("Informer Cache without global DeepCopy", func() { + CacheTest(cache.New, cache.Options{ + DefaultUnsafeDisableDeepCopy: ptr.To(true), + }) + NonBlockingGetTest(cache.New, cache.Options{ + DefaultUnsafeDisableDeepCopy: ptr.To(true), + }) +}) + +var _ = Describe("Cache with transformers", func() { + var ( + informerCache cache.Cache + informerCacheCancel context.CancelFunc + knownPod1 client.Object + knownPod2 client.Object + knownPod3 client.Object + knownPod4 client.Object + knownPod5 client.Object + knownPod6 client.Object + ) + + getTransformValue := func(obj client.Object) string { + accessor, err := meta.Accessor(obj) + if err == nil { + annotations := accessor.GetAnnotations() + if val, exists := annotations["transformed"]; exists { + return val + } + } + return "" + } + + BeforeEach(func(ctx SpecContext) { + var informerCacheCtx context.Context + // Has to be derived from context.Background as it has to stay valid past the + // BeforeEach. + informerCacheCtx, informerCacheCancel = context.WithCancel(context.Background()) //nolint:forbidigo + Expect(cfg).NotTo(BeNil()) + + By("creating three pods") + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + err = ensureNode(ctx, testNodeOne, cl) + Expect(err).NotTo(HaveOccurred()) + err = ensureNamespace(ctx, testNamespaceOne, cl) + Expect(err).NotTo(HaveOccurred()) + err = ensureNamespace(ctx, testNamespaceTwo, cl) + Expect(err).NotTo(HaveOccurred()) + err = ensureNamespace(ctx, testNamespaceThree, cl) + Expect(err).NotTo(HaveOccurred()) + // Includes restart policy since these objects are indexed on this field. + knownPod1 = createPod(ctx, "test-pod-1", testNamespaceOne, corev1.RestartPolicyNever) + knownPod2 = createPod(ctx, "test-pod-2", testNamespaceTwo, corev1.RestartPolicyAlways) + knownPod3 = createPodWithLabels(ctx, "test-pod-3", testNamespaceTwo, corev1.RestartPolicyOnFailure, map[string]string{"common-label": "common"}) + knownPod4 = createPodWithLabels(ctx, "test-pod-4", testNamespaceThree, corev1.RestartPolicyNever, map[string]string{"common-label": "common"}) + knownPod5 = createPod(ctx, "test-pod-5", testNamespaceOne, corev1.RestartPolicyNever) + knownPod6 = createPod(ctx, "test-pod-6", testNamespaceTwo, corev1.RestartPolicyAlways) + + podGVK := schema.GroupVersionKind{ + Kind: "Pod", + Version: "v1", + } + + knownPod1.GetObjectKind().SetGroupVersionKind(podGVK) + knownPod2.GetObjectKind().SetGroupVersionKind(podGVK) + knownPod3.GetObjectKind().SetGroupVersionKind(podGVK) + knownPod4.GetObjectKind().SetGroupVersionKind(podGVK) + knownPod5.GetObjectKind().SetGroupVersionKind(podGVK) + knownPod6.GetObjectKind().SetGroupVersionKind(podGVK) + + By("creating the informer cache") + informerCache, err = cache.New(cfg, cache.Options{ + DefaultTransform: func(i interface{}) (interface{}, error) { + obj := i.(runtime.Object) + Expect(obj).NotTo(BeNil()) + + accessor, err := meta.Accessor(obj) + Expect(err).ToNot(HaveOccurred()) + annotations := accessor.GetAnnotations() + + if _, exists := annotations["transformed"]; exists { + // Avoid performing transformation multiple times. + return i, nil + } + + if annotations == nil { + annotations = make(map[string]string) + } + annotations["transformed"] = "default" + accessor.SetAnnotations(annotations) + return i, nil + }, + ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: { + Transform: func(i interface{}) (interface{}, error) { + obj := i.(runtime.Object) + Expect(obj).NotTo(BeNil()) + accessor, err := meta.Accessor(obj) + Expect(err).ToNot(HaveOccurred()) + + annotations := accessor.GetAnnotations() + if _, exists := annotations["transformed"]; exists { + // Avoid performing transformation multiple times. + return i, nil + } + + if annotations == nil { + annotations = make(map[string]string) + } + annotations["transformed"] = "explicit" + accessor.SetAnnotations(annotations) + return i, nil + }, + }, + }, + }) + Expect(err).NotTo(HaveOccurred()) + By("running the cache and waiting for it to sync") + // pass as an arg so that we don't race between close and re-assign + go func(ctx context.Context) { + defer GinkgoRecover() + Expect(informerCache.Start(ctx)).To(Succeed()) + }(informerCacheCtx) + Expect(informerCache.WaitForCacheSync(ctx)).To(BeTrue()) + }) + + AfterEach(func(ctx SpecContext) { + By("cleaning up created pods") + deletePod(ctx, knownPod1) + deletePod(ctx, knownPod2) + deletePod(ctx, knownPod3) + deletePod(ctx, knownPod4) + deletePod(ctx, knownPod5) + deletePod(ctx, knownPod6) + + informerCacheCancel() + }) + + Context("with structured objects", func() { + It("should apply transformers to explicitly specified GVKS", func(ctx SpecContext) { + By("listing pods") + out := corev1.PodList{} + Expect(informerCache.List(ctx, &out)).To(Succeed()) + + By("verifying that the returned pods were transformed") + for i := 0; i < len(out.Items); i++ { + Expect(getTransformValue(&out.Items[i])).To(BeIdenticalTo("explicit")) + } + }) + + It("should apply default transformer to objects when none is specified", func(ctx SpecContext) { + By("getting the Kubernetes service") + svc := &corev1.Service{} + svcKey := client.ObjectKey{Namespace: "default", Name: "kubernetes"} + Expect(informerCache.Get(ctx, svcKey, svc)).To(Succeed()) + + By("verifying that the returned service was transformed") + Expect(getTransformValue(svc)).To(BeIdenticalTo("default")) + }) + }) + + Context("with unstructured objects", func() { + It("should apply transformers to explicitly specified GVKS", func(ctx SpecContext) { + By("listing pods") + out := unstructured.UnstructuredList{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + Expect(informerCache.List(ctx, &out)).To(Succeed()) + + By("verifying that the returned pods were transformed") + for i := 0; i < len(out.Items); i++ { + Expect(getTransformValue(&out.Items[i])).To(BeIdenticalTo("explicit")) + } + }) + + It("should apply default transformer to objects when none is specified", func(ctx SpecContext) { + By("getting the Kubernetes service") + svc := &unstructured.Unstructured{} + svc.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Service", + }) + svcKey := client.ObjectKey{Namespace: "default", Name: "kubernetes"} + Expect(informerCache.Get(ctx, svcKey, svc)).To(Succeed()) + + By("verifying that the returned service was transformed") + Expect(getTransformValue(svc)).To(BeIdenticalTo("default")) + }) + }) + + Context("with metadata-only objects", func() { + It("should apply transformers to explicitly specified GVKS", func(ctx SpecContext) { + By("listing pods") + out := metav1.PartialObjectMetadataList{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + Expect(informerCache.List(ctx, &out)).To(Succeed()) + + By("verifying that the returned pods were transformed") + for i := 0; i < len(out.Items); i++ { + Expect(getTransformValue(&out.Items[i])).To(BeIdenticalTo("explicit")) + } + }) + It("should apply default transformer to objects when none is specified", func(ctx SpecContext) { + By("getting the Kubernetes service") + svc := &metav1.PartialObjectMetadata{} + svc.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Service", + }) + svcKey := client.ObjectKey{Namespace: "default", Name: "kubernetes"} + Expect(informerCache.Get(ctx, svcKey, svc)).To(Succeed()) + + By("verifying that the returned service was transformed") + Expect(getTransformValue(svc)).To(BeIdenticalTo("default")) + }) + }) }) -var _ = Describe("Multi-Namesapce Informer Cache", func() { - CacheTest(cache.MultiNamespacedCacheBuilder([]string{testNamespaceOne, testNamespaceTwo, "default"})) + +var _ = Describe("Cache with selectors", func() { + defer GinkgoRecover() + var ( + informerCache cache.Cache + informerCacheCancel context.CancelFunc + ) + + BeforeEach(func(ctx SpecContext) { + var informerCacheCtx context.Context + // Has to be derived from context.Background as it has to stay valid past the + // BeforeEach. + informerCacheCtx, informerCacheCancel = context.WithCancel(context.Background()) //nolint:forbidigo + Expect(cfg).NotTo(BeNil()) + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + err = ensureNamespace(ctx, testNamespaceOne, cl) + Expect(err).NotTo(HaveOccurred()) + err = ensureNamespace(ctx, testNamespaceTwo, cl) + Expect(err).NotTo(HaveOccurred()) + for idx, namespace := range []string{testNamespaceOne, testNamespaceTwo} { + _ = createSA(ctx, "test-sa-"+strconv.Itoa(idx), namespace, cl) + _ = createSvc(ctx, "test-svc-"+strconv.Itoa(idx), namespace, cl) + } + + opts := cache.Options{ + DefaultFieldSelector: fields.OneTermEqualSelector("metadata.namespace", testNamespaceTwo), + ByObject: map[client.Object]cache.ByObject{ + &corev1.ServiceAccount{}: { + Field: fields.OneTermEqualSelector("metadata.namespace", testNamespaceOne), + }, + }, + } + + By("creating the informer cache") + informerCache, err = cache.New(cfg, opts) + Expect(err).NotTo(HaveOccurred()) + By("running the cache and waiting for it to sync") + // pass as an arg so that we don't race between close and re-assign + go func(ctx context.Context) { + defer GinkgoRecover() + Expect(informerCache.Start(ctx)).To(Succeed()) + }(informerCacheCtx) + Expect(informerCache.WaitForCacheSync(informerCacheCtx)).To(BeTrue()) + }) + + AfterEach(func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + for idx, namespace := range []string{testNamespaceOne, testNamespaceTwo} { + err = cl.Delete(ctx, &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: "test-sa-" + strconv.Itoa(idx)}}) + Expect(err).NotTo(HaveOccurred()) + err = cl.Delete(ctx, &corev1.Service{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: "test-svc-" + strconv.Itoa(idx)}}) + Expect(err).NotTo(HaveOccurred()) + } + informerCacheCancel() + }) + + It("Should list serviceaccounts and find exactly one in namespace "+testNamespaceOne, func(ctx SpecContext) { + var sas corev1.ServiceAccountList + err := informerCache.List(ctx, &sas) + Expect(err).NotTo(HaveOccurred()) + Expect(sas.Items).To(HaveLen(1)) + Expect(sas.Items[0].Namespace).To(Equal(testNamespaceOne)) + }) + + It("Should list services and find exactly one in namespace "+testNamespaceTwo, func(ctx SpecContext) { + var svcs corev1.ServiceList + err := informerCache.List(ctx, &svcs) + Expect(err).NotTo(HaveOccurred()) + Expect(svcs.Items).To(HaveLen(1)) + Expect(svcs.Items[0].Namespace).To(Equal(testNamespaceTwo)) + }) }) -// nolint: gocyclo -func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (cache.Cache, error)) { +func CacheTestReaderFailOnMissingInformer(createCacheFunc func(config *rest.Config, opts cache.Options) (cache.Cache, error), opts cache.Options) { + Describe("Cache test with ReaderFailOnMissingInformer = true", func() { + var ( + informerCache cache.Cache + informerCacheCancel context.CancelFunc + errNotCached *cache.ErrResourceNotCached + ) + + BeforeEach(func(ctx SpecContext) { + var informerCacheCtx context.Context + // Has to be derived from context.Background as it has to stay valid past the + // BeforeEach. + informerCacheCtx, informerCacheCancel = context.WithCancel(context.Background()) //nolint:forbidigo + Expect(cfg).NotTo(BeNil()) + By("creating the informer cache") + var err error + informerCache, err = createCacheFunc(cfg, opts) + Expect(err).NotTo(HaveOccurred()) + By("running the cache and waiting for it to sync") + // pass as an arg so that we don't race between close and re-assign + go func(ctx context.Context) { + defer GinkgoRecover() + Expect(informerCache.Start(ctx)).To(Succeed()) + }(informerCacheCtx) + Expect(informerCache.WaitForCacheSync(ctx)).To(BeTrue()) + }) + + AfterEach(func() { + informerCacheCancel() + }) + + Describe("as a Reader", func() { + Context("with structured objects", func() { + It("should not be able to list objects that haven't been watched previously", func(ctx SpecContext) { + By("listing all services in the cluster") + listObj := &corev1.ServiceList{} + Expect(errors.As(informerCache.List(ctx, listObj), &errNotCached)).To(BeTrue()) + }) + + It("should not be able to get objects that haven't been watched previously", func(ctx SpecContext) { + By("getting the Kubernetes service") + svc := &corev1.Service{} + svcKey := client.ObjectKey{Namespace: "default", Name: "kubernetes"} + Expect(errors.As(informerCache.Get(ctx, svcKey, svc), &errNotCached)).To(BeTrue()) + }) + + It("should be able to list objects that are configured to be watched", func(ctx SpecContext) { + By("indicating that we need to watch services") + _, err := informerCache.GetInformer(ctx, &corev1.Service{}) + Expect(err).ToNot(HaveOccurred()) + + By("listing all services in the cluster") + svcList := &corev1.ServiceList{} + Expect(informerCache.List(ctx, svcList)).To(Succeed()) + + By("verifying that the returned service looks reasonable") + Expect(svcList.Items).To(HaveLen(1)) + Expect(svcList.Items[0].Name).To(Equal("kubernetes")) + Expect(svcList.Items[0].Namespace).To(Equal("default")) + }) + + It("should be able to get objects that are configured to be watched", func(ctx SpecContext) { + By("indicating that we need to watch services") + _, err := informerCache.GetInformer(ctx, &corev1.Service{}) + Expect(err).ToNot(HaveOccurred()) + + By("getting the Kubernetes service") + svc := &corev1.Service{} + svcKey := client.ObjectKey{Namespace: "default", Name: "kubernetes"} + Expect(informerCache.Get(ctx, svcKey, svc)).To(Succeed()) + + By("verifying that the returned service looks reasonable") + Expect(svc.Name).To(Equal("kubernetes")) + Expect(svc.Namespace).To(Equal("default")) + }) + }) + }) + }) +} + +func NonBlockingGetTest(createCacheFunc func(config *rest.Config, opts cache.Options) (cache.Cache, error), opts cache.Options) { + Describe("non-blocking get test", func() { + var ( + informerCache cache.Cache + informerCacheCancel context.CancelFunc + ) + BeforeEach(func(ctx SpecContext) { + var informerCacheCtx context.Context + // Has to be derived from context.Background as it has to stay valid past the + // BeforeEach. + informerCacheCtx, informerCacheCancel = context.WithCancel(context.Background()) //nolint:forbidigo + Expect(cfg).NotTo(BeNil()) + + By("creating expected namespaces") + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + err = ensureNode(ctx, testNodeOne, cl) + Expect(err).NotTo(HaveOccurred()) + err = ensureNamespace(ctx, testNamespaceOne, cl) + Expect(err).NotTo(HaveOccurred()) + err = ensureNamespace(ctx, testNamespaceTwo, cl) + Expect(err).NotTo(HaveOccurred()) + err = ensureNamespace(ctx, testNamespaceThree, cl) + Expect(err).NotTo(HaveOccurred()) + + By("creating the informer cache") + opts.NewInformer = func(_ kcache.ListerWatcher, _ runtime.Object, _ time.Duration, _ kcache.Indexers) kcache.SharedIndexInformer { + return &controllertest.FakeInformer{Synced: false} + } + informerCache, err = createCacheFunc(cfg, opts) + Expect(err).NotTo(HaveOccurred()) + By("running the cache and waiting for it to sync") + // pass as an arg so that we don't race between close and re-assign + go func(ctx context.Context) { + defer GinkgoRecover() + Expect(informerCache.Start(ctx)).To(Succeed()) + }(informerCacheCtx) + Expect(informerCache.WaitForCacheSync(ctx)).To(BeTrue()) + }) + + AfterEach(func() { + By("cleaning up created pods") + informerCacheCancel() + }) + + Describe("as an Informer", func() { + It("should be able to get informer for the object without blocking", func(specCtx SpecContext) { + By("getting a shared index informer for a pod") + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "informer-obj", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + } + + ctx, cancel := context.WithTimeout(specCtx, 5*time.Second) + defer cancel() + sii, err := informerCache.GetInformer(ctx, pod, cache.BlockUntilSynced(false)) + Expect(err).NotTo(HaveOccurred()) + Expect(sii).NotTo(BeNil()) + Expect(sii.HasSynced()).To(BeFalse()) + }) + }) + }) +} + +func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (cache.Cache, error), opts cache.Options) { Describe("Cache test", func() { var ( - informerCache cache.Cache - stop chan struct{} - knownPod1 runtime.Object - knownPod2 runtime.Object - knownPod3 runtime.Object - knownPod4 runtime.Object + informerCache cache.Cache + informerCacheCancel context.CancelFunc + knownPod1 client.Object + knownPod2 client.Object + knownPod3 client.Object + knownPod4 client.Object + knownPod5 client.Object + knownPod6 client.Object ) - BeforeEach(func() { - stop = make(chan struct{}) + BeforeEach(func(ctx SpecContext) { + var informerCacheCtx context.Context + // Has to be derived from context.Background as it has to stay valid past the + // BeforeEach. + informerCacheCtx, informerCacheCancel = context.WithCancel(context.Background()) //nolint:forbidigo Expect(cfg).NotTo(BeNil()) By("creating three pods") + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + err = ensureNode(ctx, testNodeOne, cl) + Expect(err).NotTo(HaveOccurred()) + err = ensureNode(ctx, testNodeTwo, cl) + Expect(err).NotTo(HaveOccurred()) + err = ensureNamespace(ctx, testNamespaceOne, cl) + Expect(err).NotTo(HaveOccurred()) + err = ensureNamespace(ctx, testNamespaceTwo, cl) + Expect(err).NotTo(HaveOccurred()) + err = ensureNamespace(ctx, testNamespaceThree, cl) + Expect(err).NotTo(HaveOccurred()) // Includes restart policy since these objects are indexed on this field. - knownPod1 = createPod("test-pod-1", testNamespaceOne, kcorev1.RestartPolicyNever) - knownPod2 = createPod("test-pod-2", testNamespaceTwo, kcorev1.RestartPolicyAlways) - knownPod3 = createPod("test-pod-3", testNamespaceTwo, kcorev1.RestartPolicyOnFailure) - knownPod4 = createPod("test-pod-4", testNamespaceThree, kcorev1.RestartPolicyNever) + knownPod1 = createPod(ctx, "test-pod-1", testNamespaceOne, corev1.RestartPolicyNever) + knownPod2 = createPod(ctx, "test-pod-2", testNamespaceTwo, corev1.RestartPolicyAlways) + knownPod3 = createPodWithLabels(ctx, "test-pod-3", testNamespaceTwo, corev1.RestartPolicyOnFailure, map[string]string{"common-label": "common"}) + knownPod4 = createPodWithLabels(ctx, "test-pod-4", testNamespaceThree, corev1.RestartPolicyNever, map[string]string{"common-label": "common"}) + knownPod5 = createPod(ctx, "test-pod-5", testNamespaceOne, corev1.RestartPolicyNever) + knownPod6 = createPod(ctx, "test-pod-6", testNamespaceTwo, corev1.RestartPolicyAlways) + podGVK := schema.GroupVersionKind{ Kind: "Pod", Version: "v1", } + knownPod1.GetObjectKind().SetGroupVersionKind(podGVK) knownPod2.GetObjectKind().SetGroupVersionKind(podGVK) knownPod3.GetObjectKind().SetGroupVersionKind(podGVK) knownPod4.GetObjectKind().SetGroupVersionKind(podGVK) + knownPod5.GetObjectKind().SetGroupVersionKind(podGVK) + knownPod6.GetObjectKind().SetGroupVersionKind(podGVK) By("creating the informer cache") - var err error - informerCache, err = createCacheFunc(cfg, cache.Options{}) + informerCache, err = createCacheFunc(cfg, opts) Expect(err).NotTo(HaveOccurred()) By("running the cache and waiting for it to sync") - go func() { + // pass as an arg so that we don't race between close and re-assign + go func(ctx context.Context) { defer GinkgoRecover() - Expect(informerCache.Start(stop)).To(Succeed()) - }() - Expect(informerCache.WaitForCacheSync(stop)).To(BeTrue()) + Expect(informerCache.Start(ctx)).To(Succeed()) + }(informerCacheCtx) + Expect(informerCache.WaitForCacheSync(ctx)).To(BeTrue()) }) - AfterEach(func() { + AfterEach(func(ctx SpecContext) { By("cleaning up created pods") - deletePod(knownPod1) - deletePod(knownPod2) - deletePod(knownPod3) - deletePod(knownPod4) - - close(stop) + deletePod(ctx, knownPod1) + deletePod(ctx, knownPod2) + deletePod(ctx, knownPod3) + deletePod(ctx, knownPod4) + deletePod(ctx, knownPod5) + deletePod(ctx, knownPod6) + + informerCacheCancel() }) Describe("as a Reader", func() { Context("with structured objects", func() { - - It("should be able to list objects that haven't been watched previously", func() { + It("should be able to list objects that haven't been watched previously", func(ctx SpecContext) { By("listing all services in the cluster") - listObj := &kcorev1.ServiceList{} - Expect(informerCache.List(context.Background(), listObj)).To(Succeed()) + listObj := &corev1.ServiceList{} + Expect(informerCache.List(ctx, listObj)).To(Succeed()) By("verifying that the returned list contains the Kubernetes service") // NB: kubernetes default service is automatically created in testenv. Expect(listObj.Items).NotTo(BeEmpty()) hasKubeService := false - for _, svc := range listObj.Items { - if svc.Namespace == "default" && svc.Name == "kubernetes" { + for i := range listObj.Items { + svc := &listObj.Items[i] + if isKubeService(svc) { hasKubeService = true break } @@ -153,22 +697,22 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca Expect(hasKubeService).To(BeTrue()) }) - It("should be able to get objects that haven't been watched previously", func() { + It("should be able to get objects that haven't been watched previously", func(ctx SpecContext) { By("getting the Kubernetes service") - svc := &kcorev1.Service{} + svc := &corev1.Service{} svcKey := client.ObjectKey{Namespace: "default", Name: "kubernetes"} - Expect(informerCache.Get(context.Background(), svcKey, svc)).To(Succeed()) + Expect(informerCache.Get(ctx, svcKey, svc)).To(Succeed()) By("verifying that the returned service looks reasonable") Expect(svc.Name).To(Equal("kubernetes")) Expect(svc.Namespace).To(Equal("default")) }) - It("should support filtering by labels in a single namespace", func() { + It("should support filtering by labels in a single namespace", func(ctx SpecContext) { By("listing pods with a particular label") // NB: each pod has a "test-label": - out := kcorev1.PodList{} - Expect(informerCache.List(context.Background(), &out, + out := corev1.PodList{} + Expect(informerCache.List(ctx, &out, client.InNamespace(testNamespaceTwo), client.MatchingLabels(map[string]string{"test-label": "test-pod-2"}))).To(Succeed()) @@ -179,16 +723,16 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca Expect(actual.Labels["test-label"]).To(Equal("test-pod-2")) }) - It("should support filtering by labels from multiple namespaces", func() { + It("should support filtering by labels from multiple namespaces", func(ctx SpecContext) { By("creating another pod with the same label but different namespace") - anotherPod := createPod("test-pod-2", testNamespaceOne, kcorev1.RestartPolicyAlways) - defer deletePod(anotherPod) + anotherPod := createPod(ctx, "test-pod-2", testNamespaceOne, corev1.RestartPolicyAlways) + defer deletePod(ctx, anotherPod) By("listing pods with a particular label") // NB: each pod has a "test-label": - out := kcorev1.PodList{} + out := corev1.PodList{} labels := map[string]string{"test-label": "test-pod-2"} - Expect(informerCache.List(context.Background(), &out, client.MatchingLabels(labels))).To(Succeed()) + Expect(informerCache.List(ctx, &out, client.MatchingLabels(labels))).To(Succeed()) By("verifying multiple pods with the same label in different namespaces are returned") Expect(out.Items).NotTo(BeEmpty()) @@ -198,71 +742,158 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca } }) - It("should be able to list objects with GVK populated", func() { - By("listing pods") - out := &kcorev1.PodList{} - Expect(informerCache.List(context.Background(), out)).To(Succeed()) - - By("verifying that the returned pods have GVK populated") - Expect(out.Items).NotTo(BeEmpty()) - Expect(out.Items).Should(SatisfyAny(HaveLen(3), HaveLen(4))) - for _, p := range out.Items { - Expect(p.GroupVersionKind()).To(Equal(kcorev1.SchemeGroupVersion.WithKind("Pod"))) - } - }) + if !isPodDisableDeepCopy(opts) { + It("should be able to list objects with GVK populated", func(ctx SpecContext) { + By("listing pods") + out := &corev1.PodList{} + Expect(informerCache.List(ctx, out)).To(Succeed()) + + By("verifying that the returned pods have GVK populated") + Expect(out.Items).NotTo(BeEmpty()) + Expect(out.Items).Should(SatisfyAny(HaveLen(5), HaveLen(6))) + for _, p := range out.Items { + Expect(p.GroupVersionKind()).To(Equal(corev1.SchemeGroupVersion.WithKind("Pod"))) + } + }) + } - It("should be able to list objects by namespace", func() { + It("should be able to list objects by namespace", func(ctx SpecContext) { By("listing pods in test-namespace-1") - listObj := &kcorev1.PodList{} - Expect(informerCache.List(context.Background(), listObj, + listObj := &corev1.PodList{} + Expect(informerCache.List(ctx, listObj, client.InNamespace(testNamespaceOne))).To(Succeed()) By("verifying that the returned pods are in test-namespace-1") Expect(listObj.Items).NotTo(BeEmpty()) - Expect(listObj.Items).Should(HaveLen(1)) - actual := listObj.Items[0] - Expect(actual.Namespace).To(Equal(testNamespaceOne)) + Expect(listObj.Items).Should(HaveLen(2)) + for _, item := range listObj.Items { + Expect(item.Namespace).To(Equal(testNamespaceOne)) + } }) - It("should deep copy the object unless told otherwise", func() { - By("retrieving a specific pod from the cache") - out := &kcorev1.Pod{} - podKey := client.ObjectKey{Name: "test-pod-2", Namespace: testNamespaceTwo} - Expect(informerCache.Get(context.Background(), podKey, out)).To(Succeed()) + if !isPodDisableDeepCopy(opts) { + It("should deep copy the object unless told otherwise", func(ctx SpecContext) { + By("retrieving a specific pod from the cache") + out := &corev1.Pod{} + podKey := client.ObjectKey{Name: "test-pod-2", Namespace: testNamespaceTwo} + Expect(informerCache.Get(ctx, podKey, out)).To(Succeed()) - By("verifying the retrieved pod is equal to a known pod") - Expect(out).To(Equal(knownPod2)) + By("verifying the retrieved pod is equal to a known pod") + Expect(out).To(Equal(knownPod2)) - By("altering a field in the retrieved pod") - *out.Spec.ActiveDeadlineSeconds = 4 + By("altering a field in the retrieved pod") + *out.Spec.ActiveDeadlineSeconds = 4 - By("verifying the pods are no longer equal") - Expect(out).NotTo(Equal(knownPod2)) - }) + By("verifying the pods are no longer equal") + Expect(out).NotTo(Equal(knownPod2)) + }) + } else { + It("should not deep copy the object if UnsafeDisableDeepCopy is enabled", func(ctx SpecContext) { + By("getting a specific pod from the cache twice") + podKey := client.ObjectKey{Name: "test-pod-2", Namespace: testNamespaceTwo} + out1 := &corev1.Pod{} + Expect(informerCache.Get(ctx, podKey, out1)).To(Succeed()) + out2 := &corev1.Pod{} + Expect(informerCache.Get(ctx, podKey, out2)).To(Succeed()) + + By("verifying the pointer fields in pod have the same addresses") + Expect(out1).To(Equal(out2)) + Expect(reflect.ValueOf(out1.Labels).Pointer()).To(BeIdenticalTo(reflect.ValueOf(out2.Labels).Pointer())) + + By("listing pods from the cache twice") + outList1 := &corev1.PodList{} + Expect(informerCache.List(ctx, outList1, client.InNamespace(testNamespaceOne))).To(Succeed()) + outList2 := &corev1.PodList{} + Expect(informerCache.List(ctx, outList2, client.InNamespace(testNamespaceOne))).To(Succeed()) + + By("verifying the pointer fields in pod have the same addresses") + Expect(outList1.Items).To(HaveLen(len(outList2.Items))) + sort.SliceStable(outList1.Items, func(i, j int) bool { return outList1.Items[i].Name <= outList1.Items[j].Name }) + sort.SliceStable(outList2.Items, func(i, j int) bool { return outList2.Items[i].Name <= outList2.Items[j].Name }) + for i := range outList1.Items { + a := &outList1.Items[i] + b := &outList2.Items[i] + Expect(a).To(Equal(b)) + Expect(reflect.ValueOf(a.Labels).Pointer()).To(BeIdenticalTo(reflect.ValueOf(b.Labels).Pointer())) + } + }) + } - It("should return an error if the object is not found", func() { + It("should return an error if the object is not found", func(ctx SpecContext) { By("getting a service that does not exists") - svc := &kcorev1.Service{} + svc := &corev1.Service{} svcKey := client.ObjectKey{Namespace: testNamespaceOne, Name: "unknown"} By("verifying that an error is returned") - err := informerCache.Get(context.Background(), svcKey, svc) + err := informerCache.Get(ctx, svcKey, svc) Expect(err).To(HaveOccurred()) - Expect(errors.IsNotFound(err)).To(BeTrue()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) }) - It("should return an error if getting object in unwatched namespace", func() { + It("should return an error if getting object in unwatched namespace", func(ctx SpecContext) { By("getting a service that does not exists") - svc := &kcorev1.Service{} + svc := &corev1.Service{} svcKey := client.ObjectKey{Namespace: "unknown", Name: "unknown"} By("verifying that an error is returned") - err := informerCache.Get(context.Background(), svcKey, svc) + err := informerCache.Get(ctx, svcKey, svc) + Expect(err).To(HaveOccurred()) + }) + + It("should return an error when context is cancelled", func(specCtx SpecContext) { + By("cancelling the context") + ctx := cancelledCtx(specCtx) + + By("listing pods in test-namespace-1 with a cancelled context") + listObj := &corev1.PodList{} + err := informerCache.List(ctx, listObj, client.InNamespace(testNamespaceOne)) + + By("verifying that an error is returned") + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsTimeout(err)).To(BeTrue()) + }) + + It("should set the Limit option and limit number of objects to Limit when List is called", func(ctx SpecContext) { + opts := &client.ListOptions{Limit: int64(3)} + By("verifying that only Limit (3) number of objects are retrieved from the cache") + listObj := &corev1.PodList{} + Expect(informerCache.List(ctx, listObj, opts)).To(Succeed()) + Expect(listObj.Items).Should(HaveLen(3)) + }) + + It("should return a limited result set matching the correct label", func(ctx SpecContext) { + listObj := &corev1.PodList{} + labelOpt := client.MatchingLabels(map[string]string{"common-label": "common"}) + limitOpt := client.Limit(1) + By("verifying that only Limit (1) number of objects are retrieved from the cache") + Expect(informerCache.List(ctx, listObj, labelOpt, limitOpt)).To(Succeed()) + Expect(listObj.Items).Should(HaveLen(1)) + }) + + It("should return an error if pagination is used", func(ctx SpecContext) { + listObj := &corev1.PodList{} + By("verifying that the first list works and returns a sentinel continue") + err := informerCache.List(ctx, listObj) + Expect(err).ToNot(HaveOccurred()) + Expect(listObj.Continue).To(Equal("continue-not-supported")) + + By("verifying that an error is returned") + err = informerCache.List(ctx, listObj, client.Continue(listObj.Continue)) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("continue list option is not supported by the cache")) + }) + + It("should return an error if the continue list options is set", func(ctx SpecContext) { + listObj := &corev1.PodList{} + continueOpt := client.Continue("token") + By("verifying that an error is returned") + err := informerCache.List(ctx, listObj, continueOpt) Expect(err).To(HaveOccurred()) }) }) + Context("with unstructured objects", func() { - It("should be able to list objects that haven't been watched previously", func() { + It("should be able to list objects that haven't been watched previously", func(ctx SpecContext) { By("listing all services in the cluster") listObj := &unstructured.UnstructuredList{} listObj.SetGroupVersionKind(schema.GroupVersionKind{ @@ -270,22 +901,23 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca Version: "v1", Kind: "ServiceList", }) - err := informerCache.List(context.Background(), listObj) + err := informerCache.List(ctx, listObj) Expect(err).To(Succeed()) By("verifying that the returned list contains the Kubernetes service") // NB: kubernetes default service is automatically created in testenv. Expect(listObj.Items).NotTo(BeEmpty()) hasKubeService := false - for _, svc := range listObj.Items { - if svc.GetNamespace() == "default" && svc.GetName() == "kubernetes" { + for i := range listObj.Items { + svc := &listObj.Items[i] + if isKubeService(svc) { hasKubeService = true break } } Expect(hasKubeService).To(BeTrue()) }) - It("should be able to get objects that haven't been watched previously", func() { + It("should be able to get objects that haven't been watched previously", func(ctx SpecContext) { By("getting the Kubernetes service") svc := &unstructured.Unstructured{} svc.SetGroupVersionKind(schema.GroupVersionKind{ @@ -294,14 +926,14 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca Kind: "Service", }) svcKey := client.ObjectKey{Namespace: "default", Name: "kubernetes"} - Expect(informerCache.Get(context.Background(), svcKey, svc)).To(Succeed()) + Expect(informerCache.Get(ctx, svcKey, svc)).To(Succeed()) By("verifying that the returned service looks reasonable") Expect(svc.GetName()).To(Equal("kubernetes")) Expect(svc.GetNamespace()).To(Equal("default")) }) - It("should support filtering by labels in a single namespace", func() { + It("should support filtering by labels in a single namespace", func(ctx SpecContext) { By("listing pods with a particular label") // NB: each pod has a "test-label": out := unstructured.UnstructuredList{} @@ -310,7 +942,7 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca Version: "v1", Kind: "PodList", }) - err := informerCache.List(context.Background(), &out, + err := informerCache.List(ctx, &out, client.InNamespace(testNamespaceTwo), client.MatchingLabels(map[string]string{"test-label": "test-pod-2"})) Expect(err).To(Succeed()) @@ -322,10 +954,10 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca Expect(actual.GetLabels()["test-label"]).To(Equal("test-pod-2")) }) - It("should support filtering by labels from multiple namespaces", func() { + It("should support filtering by labels from multiple namespaces", func(ctx SpecContext) { By("creating another pod with the same label but different namespace") - anotherPod := createPod("test-pod-2", testNamespaceOne, kcorev1.RestartPolicyAlways) - defer deletePod(anotherPod) + anotherPod := createPod(ctx, "test-pod-2", testNamespaceOne, corev1.RestartPolicyAlways) + defer deletePod(ctx, anotherPod) By("listing pods with a particular label") // NB: each pod has a "test-label": @@ -336,7 +968,7 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca Kind: "PodList", }) labels := map[string]string{"test-label": "test-pod-2"} - err := informerCache.List(context.Background(), &out, client.MatchingLabels(labels)) + err := informerCache.List(ctx, &out, client.MatchingLabels(labels)) Expect(err).To(Succeed()) By("verifying multiple pods with the same label in different namespaces are returned") @@ -345,10 +977,9 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca for _, actual := range out.Items { Expect(actual.GetLabels()["test-label"]).To(Equal("test-pod-2")) } - }) - It("should be able to list objects by namespace", func() { + It("should be able to list objects by namespace", func(ctx SpecContext) { By("listing pods in test-namespace-1") listObj := &unstructured.UnstructuredList{} listObj.SetGroupVersionKind(schema.GroupVersionKind{ @@ -356,81 +987,165 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca Version: "v1", Kind: "PodList", }) - err := informerCache.List(context.Background(), listObj, client.InNamespace(testNamespaceOne)) + err := informerCache.List(ctx, listObj, client.InNamespace(testNamespaceOne)) Expect(err).To(Succeed()) By("verifying that the returned pods are in test-namespace-1") Expect(listObj.Items).NotTo(BeEmpty()) - Expect(listObj.Items).Should(HaveLen(1)) - actual := listObj.Items[0] - Expect(actual.GetNamespace()).To(Equal(testNamespaceOne)) + Expect(listObj.Items).Should(HaveLen(2)) + for _, item := range listObj.Items { + Expect(item.GetNamespace()).To(Equal(testNamespaceOne)) + } }) - It("should be able to restrict cache to a namespace", func() { - By("creating a namespaced cache") - namespacedCache, err := cache.New(cfg, cache.Options{Namespace: testNamespaceOne}) - Expect(err).NotTo(HaveOccurred()) - - By("running the cache and waiting for it to sync") - go func() { - defer GinkgoRecover() - Expect(namespacedCache.Start(stop)).To(Succeed()) - }() - Expect(namespacedCache.WaitForCacheSync(stop)).NotTo(BeFalse()) - - By("listing pods in all namespaces") - out := &unstructured.UnstructuredList{} - out.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "", - Version: "v1", - Kind: "PodList", + cacheRestrictSubTests := []struct { + nameSuffix string + cacheOpts cache.Options + }{ + { + nameSuffix: "by using the per-gvk setting", + cacheOpts: cache.Options{ + ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: { + Namespaces: map[string]cache.Config{ + testNamespaceOne: {}, + }, + }, + }, + }, + }, + { + nameSuffix: "by using the global DefaultNamespaces setting", + cacheOpts: cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + testNamespaceOne: {}, + }, + }, + }, + } + + for _, tc := range cacheRestrictSubTests { + It("should be able to restrict cache to a namespace "+tc.nameSuffix, func(ctx SpecContext) { + By("creating a namespaced cache") + namespacedCache, err := cache.New(cfg, tc.cacheOpts) + Expect(err).NotTo(HaveOccurred()) + + By("running the cache and waiting for it to sync") + go func() { + defer GinkgoRecover() + Expect(namespacedCache.Start(ctx)).To(Succeed()) + }() + Expect(namespacedCache.WaitForCacheSync(ctx)).To(BeTrue()) + + By("listing pods in all namespaces") + out := &unstructured.UnstructuredList{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + for range 2 { + Expect(namespacedCache.List(ctx, out)).To(Succeed()) + + By("verifying the returned pod is from the watched namespace") + Expect(out.Items).NotTo(BeEmpty()) + Expect(out.Items).Should(HaveLen(2)) + for _, item := range out.Items { + Expect(item.GetNamespace()).To(Equal(testNamespaceOne)) + } + } + By("listing all nodes - should still be able to list a cluster-scoped resource") + nodeList := &unstructured.UnstructuredList{} + nodeList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "NodeList", + }) + Expect(namespacedCache.List(ctx, nodeList)).To(Succeed()) + + By("verifying the node list is not empty") + Expect(nodeList.Items).NotTo(BeEmpty()) + + By("getting a node - should still be able to get a cluster-scoped resource") + node := &unstructured.Unstructured{} + node.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Node", + }) + + By("verifying that getting the node works with an empty namespace") + key1 := client.ObjectKey{Namespace: "", Name: testNodeOne} + Expect(namespacedCache.Get(ctx, key1, node)).To(Succeed()) + + By("verifying that the namespace is ignored when getting a cluster-scoped resource") + key2 := client.ObjectKey{Namespace: "random", Name: testNodeOne} + Expect(namespacedCache.Get(ctx, key2, node)).To(Succeed()) }) - Expect(namespacedCache.List(context.Background(), out)).To(Succeed()) - - By("verifying the returned pod is from the watched namespace") - Expect(out.Items).NotTo(BeEmpty()) - Expect(out.Items).Should(HaveLen(1)) - Expect(out.Items[0].GetNamespace()).To(Equal(testNamespaceOne)) - - By("listing all namespaces - should still be able to get a cluster-scoped resource") - namespaceList := &unstructured.UnstructuredList{} - namespaceList.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "", - Version: "v1", - Kind: "NamespaceList", + } + + if !isPodDisableDeepCopy(opts) { + It("should deep copy the object unless told otherwise", func(ctx SpecContext) { + By("retrieving a specific pod from the cache") + out := &unstructured.Unstructured{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + uKnownPod2 := &unstructured.Unstructured{} + Expect(kscheme.Scheme.Convert(knownPod2, uKnownPod2, nil)).To(Succeed()) + + podKey := client.ObjectKey{Name: "test-pod-2", Namespace: testNamespaceTwo} + Expect(informerCache.Get(ctx, podKey, out)).To(Succeed()) + + By("verifying the retrieved pod is equal to a known pod") + Expect(out).To(Equal(uKnownPod2)) + + By("altering a field in the retrieved pod") + m, _ := out.Object["spec"].(map[string]interface{}) + m["activeDeadlineSeconds"] = 4 + + By("verifying the pods are no longer equal") + Expect(out).NotTo(Equal(knownPod2)) }) - Expect(namespacedCache.List(context.Background(), namespaceList)).To(Succeed()) - - By("verifying the namespace list is not empty") - Expect(namespaceList.Items).NotTo(BeEmpty()) - }) - - It("should deep copy the object unless told otherwise", func() { - By("retrieving a specific pod from the cache") - out := &unstructured.Unstructured{} - out.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "", - Version: "v1", - Kind: "Pod", + } else { + It("should not deep copy the object if UnsafeDisableDeepCopy is enabled", func(ctx SpecContext) { + By("getting a specific pod from the cache twice") + podKey := client.ObjectKey{Name: "test-pod-2", Namespace: testNamespaceTwo} + out1 := &unstructured.Unstructured{} + out1.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}) + Expect(informerCache.Get(ctx, podKey, out1)).To(Succeed()) + out2 := &unstructured.Unstructured{} + out2.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}) + Expect(informerCache.Get(ctx, podKey, out2)).To(Succeed()) + + By("verifying the pointer fields in pod have the same addresses") + Expect(out1).To(Equal(out2)) + Expect(reflect.ValueOf(out1.Object).Pointer()).To(BeIdenticalTo(reflect.ValueOf(out2.Object).Pointer())) + + By("listing pods from the cache twice") + outList1 := &unstructured.UnstructuredList{} + outList1.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "PodList"}) + Expect(informerCache.List(ctx, outList1, client.InNamespace(testNamespaceOne))).To(Succeed()) + outList2 := &unstructured.UnstructuredList{} + outList2.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "PodList"}) + Expect(informerCache.List(ctx, outList2, client.InNamespace(testNamespaceOne))).To(Succeed()) + + By("verifying the pointer fields in pod have the same addresses") + Expect(outList1.Items).To(HaveLen(len(outList2.Items))) + sort.SliceStable(outList1.Items, func(i, j int) bool { return outList1.Items[i].GetName() <= outList1.Items[j].GetName() }) + sort.SliceStable(outList2.Items, func(i, j int) bool { return outList2.Items[i].GetName() <= outList2.Items[j].GetName() }) + for i := range outList1.Items { + a := &outList1.Items[i] + b := &outList2.Items[i] + Expect(a).To(Equal(b)) + Expect(reflect.ValueOf(a.Object).Pointer()).To(BeIdenticalTo(reflect.ValueOf(b.Object).Pointer())) + } }) - uKnownPod2 := &unstructured.Unstructured{} - Expect(kscheme.Scheme.Convert(knownPod2, uKnownPod2, nil)).To(Succeed()) - - podKey := client.ObjectKey{Name: "test-pod-2", Namespace: testNamespaceTwo} - Expect(informerCache.Get(context.Background(), podKey, out)).To(Succeed()) + } - By("verifying the retrieved pod is equal to a known pod") - Expect(out).To(Equal(uKnownPod2)) - - By("altering a field in the retrieved pod") - m, _ := out.Object["spec"].(map[string]interface{}) - m["activeDeadlineSeconds"] = 4 - - By("verifying the pods are no longer equal") - Expect(out).NotTo(Equal(knownPod2)) - }) - - It("should return an error if the object is not found", func() { + It("should return an error if the object is not found", func(ctx SpecContext) { By("getting a service that does not exists") svc := &unstructured.Unstructured{} svc.SetGroupVersionKind(schema.GroupVersionKind{ @@ -441,32 +1156,791 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca svcKey := client.ObjectKey{Namespace: testNamespaceOne, Name: "unknown"} By("verifying that an error is returned") - err := informerCache.Get(context.Background(), svcKey, svc) + err := informerCache.Get(ctx, svcKey, svc) Expect(err).To(HaveOccurred()) - Expect(errors.IsNotFound(err)).To(BeTrue()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) }) - It("should return an error if getting object in unwatched namespace", func() { + It("should return an error if getting object in unwatched namespace", func(ctx SpecContext) { By("getting a service that does not exists") - svc := &kcorev1.Service{} + svc := &corev1.Service{} svcKey := client.ObjectKey{Namespace: "unknown", Name: "unknown"} By("verifying that an error is returned") - err := informerCache.Get(context.Background(), svcKey, svc) + err := informerCache.Get(ctx, svcKey, svc) Expect(err).To(HaveOccurred()) }) - }) - }) - Describe("as an Informer", func() { - Context("with structured objects", func() { - It("should be able to get informer for the object", func(done Done) { + It("test multinamespaced cache for cluster scoped resources", func(ctx SpecContext) { + By("creating a multinamespaced cache to watch specific namespaces") + m, err := cache.New(cfg, cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + "default": {}, + testNamespaceOne: {}, + }, + }) + Expect(err).NotTo(HaveOccurred()) + + By("running the cache and waiting it for sync") + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).To(Succeed()) + }() + Expect(m.WaitForCacheSync(ctx)).To(BeTrue()) + + By("should be able to fetch cluster scoped resource") + node := &corev1.Node{} + + By("verifying that getting the node works with an empty namespace") + key1 := client.ObjectKey{Namespace: "", Name: testNodeOne} + Expect(m.Get(ctx, key1, node)).To(Succeed()) + + By("verifying if the cluster scoped resources are not duplicated") + nodeList := &unstructured.UnstructuredList{} + nodeList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "NodeList", + }) + Expect(m.List(ctx, nodeList)).To(Succeed()) + + By("verifying the node list is not empty") + Expect(nodeList.Items).NotTo(BeEmpty()) + Expect(len(nodeList.Items)).To(BeEquivalentTo(2)) + }) + + It("should return an error if pagination is used", func(ctx SpecContext) { + nodeList := &unstructured.UnstructuredList{} + nodeList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "NodeList", + }) + By("verifying that the first list works and returns a sentinel continue") + err := informerCache.List(ctx, nodeList) + Expect(err).ToNot(HaveOccurred()) + Expect(nodeList.GetContinue()).To(Equal("continue-not-supported")) + + By("verifying that an error is returned") + err = informerCache.List(ctx, nodeList, client.Continue(nodeList.GetContinue())) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("continue list option is not supported by the cache")) + }) + + It("should return an error if the continue list options is set", func(ctx SpecContext) { + podList := &unstructured.Unstructured{} + continueOpt := client.Continue("token") + By("verifying that an error is returned") + err := informerCache.List(ctx, podList, continueOpt) + Expect(err).To(HaveOccurred()) + }) + }) + Context("with metadata-only objects", func() { + It("should be able to list objects that haven't been watched previously", func(ctx SpecContext) { + By("listing all services in the cluster") + listObj := &metav1.PartialObjectMetadataList{} + listObj.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "ServiceList", + }) + err := informerCache.List(ctx, listObj) + Expect(err).To(Succeed()) + + By("verifying that the returned list contains the Kubernetes service") + // NB: kubernetes default service is automatically created in testenv. + Expect(listObj.Items).NotTo(BeEmpty()) + hasKubeService := false + for i := range listObj.Items { + svc := &listObj.Items[i] + if isKubeService(svc) { + hasKubeService = true + break + } + } + Expect(hasKubeService).To(BeTrue()) + }) + It("should be able to get objects that haven't been watched previously", func(ctx SpecContext) { + By("getting the Kubernetes service") + svc := &metav1.PartialObjectMetadata{} + svc.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Service", + }) + svcKey := client.ObjectKey{Namespace: "default", Name: "kubernetes"} + Expect(informerCache.Get(ctx, svcKey, svc)).To(Succeed()) + + By("verifying that the returned service looks reasonable") + Expect(svc.GetName()).To(Equal("kubernetes")) + Expect(svc.GetNamespace()).To(Equal("default")) + }) + + It("should support filtering by labels in a single namespace", func(ctx SpecContext) { + By("listing pods with a particular label") + // NB: each pod has a "test-label": + out := metav1.PartialObjectMetadataList{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + err := informerCache.List(ctx, &out, + client.InNamespace(testNamespaceTwo), + client.MatchingLabels(map[string]string{"test-label": "test-pod-2"})) + Expect(err).To(Succeed()) + + By("verifying the returned pods have the correct label") + Expect(out.Items).NotTo(BeEmpty()) + Expect(out.Items).Should(HaveLen(1)) + actual := out.Items[0] + Expect(actual.GetLabels()["test-label"]).To(Equal("test-pod-2")) + }) + + It("should support filtering by labels from multiple namespaces", func(ctx SpecContext) { + By("creating another pod with the same label but different namespace") + anotherPod := createPod(ctx, "test-pod-2", testNamespaceOne, corev1.RestartPolicyAlways) + defer deletePod(ctx, anotherPod) + + By("listing pods with a particular label") + // NB: each pod has a "test-label": + out := metav1.PartialObjectMetadataList{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + labels := map[string]string{"test-label": "test-pod-2"} + err := informerCache.List(ctx, &out, client.MatchingLabels(labels)) + Expect(err).To(Succeed()) + + By("verifying multiple pods with the same label in different namespaces are returned") + Expect(out.Items).NotTo(BeEmpty()) + Expect(out.Items).Should(HaveLen(2)) + for _, actual := range out.Items { + Expect(actual.GetLabels()["test-label"]).To(Equal("test-pod-2")) + } + }) + + It("should be able to list objects by namespace", func(ctx SpecContext) { + By("listing pods in test-namespace-1") + listObj := &metav1.PartialObjectMetadataList{} + listObj.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + err := informerCache.List(ctx, listObj, client.InNamespace(testNamespaceOne)) + Expect(err).To(Succeed()) + + By("verifying that the returned pods are in test-namespace-1") + Expect(listObj.Items).NotTo(BeEmpty()) + Expect(listObj.Items).Should(HaveLen(2)) + for _, item := range listObj.Items { + Expect(item.Namespace).To(Equal(testNamespaceOne)) + } + }) + + It("should be able to restrict cache to a namespace", func(ctx SpecContext) { + By("creating a namespaced cache") + namespacedCache, err := cache.New(cfg, cache.Options{DefaultNamespaces: map[string]cache.Config{testNamespaceOne: {}}}) + Expect(err).NotTo(HaveOccurred()) + + By("running the cache and waiting for it to sync") + go func() { + defer GinkgoRecover() + Expect(namespacedCache.Start(ctx)).To(Succeed()) + }() + Expect(namespacedCache.WaitForCacheSync(ctx)).To(BeTrue()) + + By("listing pods in all namespaces") + out := &metav1.PartialObjectMetadataList{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + Expect(namespacedCache.List(ctx, out)).To(Succeed()) + + By("verifying the returned pod is from the watched namespace") + Expect(out.Items).NotTo(BeEmpty()) + Expect(out.Items).Should(HaveLen(2)) + for _, item := range out.Items { + Expect(item.Namespace).To(Equal(testNamespaceOne)) + } + By("listing all nodes - should still be able to list a cluster-scoped resource") + nodeList := &metav1.PartialObjectMetadataList{} + nodeList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "NodeList", + }) + Expect(namespacedCache.List(ctx, nodeList)).To(Succeed()) + + By("verifying the node list is not empty") + Expect(nodeList.Items).NotTo(BeEmpty()) + + By("getting a node - should still be able to get a cluster-scoped resource") + node := &metav1.PartialObjectMetadata{} + node.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Node", + }) + + By("verifying that getting the node works with an empty namespace") + key1 := client.ObjectKey{Namespace: "", Name: testNodeOne} + Expect(namespacedCache.Get(ctx, key1, node)).To(Succeed()) + + By("verifying that the namespace is ignored when getting a cluster-scoped resource") + key2 := client.ObjectKey{Namespace: "random", Name: testNodeOne} + Expect(namespacedCache.Get(ctx, key2, node)).To(Succeed()) + }) + + It("should be able to restrict cache to a namespace for namespaced object and to given selectors for non namespaced object", func(ctx SpecContext) { + By("creating a namespaced cache") + namespacedCache, err := cache.New(cfg, cache.Options{ + DefaultNamespaces: map[string]cache.Config{testNamespaceOne: {}}, + ByObject: map[client.Object]cache.ByObject{ + &corev1.Node{}: { + Label: labels.SelectorFromSet(labels.Set{"name": testNodeTwo}), + }, + }, + }) + Expect(err).NotTo(HaveOccurred()) + + By("running the cache and waiting for it to sync") + go func() { + defer GinkgoRecover() + Expect(namespacedCache.Start(ctx)).To(Succeed()) + }() + Expect(namespacedCache.WaitForCacheSync(ctx)).To(BeTrue()) + + By("listing pods in all namespaces") + out := &metav1.PartialObjectMetadataList{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + Expect(namespacedCache.List(ctx, out)).To(Succeed()) + + By("verifying the returned pod is from the watched namespace") + Expect(out.Items).NotTo(BeEmpty()) + Expect(out.Items).Should(HaveLen(2)) + for _, item := range out.Items { + Expect(item.Namespace).To(Equal(testNamespaceOne)) + } + By("listing all nodes - should still be able to list a cluster-scoped resource") + nodeList := &metav1.PartialObjectMetadataList{} + nodeList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "NodeList", + }) + Expect(namespacedCache.List(ctx, nodeList)).To(Succeed()) + + By("verifying the node list is not empty") + Expect(nodeList.Items).NotTo(BeEmpty()) + + By("getting a node - should still be able to get a cluster-scoped resource") + node := &metav1.PartialObjectMetadata{} + node.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Node", + }) + + By("verifying that getting the node works with an empty namespace") + key1 := client.ObjectKey{Namespace: "", Name: testNodeTwo} + Expect(namespacedCache.Get(ctx, key1, node)).To(Succeed()) + + By("verifying that the namespace is ignored when getting a cluster-scoped resource") + key2 := client.ObjectKey{Namespace: "random", Name: testNodeTwo} + Expect(namespacedCache.Get(ctx, key2, node)).To(Succeed()) + + By("verifying that an error is returned for node with not matching label") + key3 := client.ObjectKey{Namespace: "", Name: testNodeOne} + err = namespacedCache.Get(ctx, key3, node) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + if !isPodDisableDeepCopy(opts) { + It("should deep copy the object unless told otherwise", func(ctx SpecContext) { + By("retrieving a specific pod from the cache") + out := &metav1.PartialObjectMetadata{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + uKnownPod2 := &metav1.PartialObjectMetadata{} + knownPod2.(*corev1.Pod).ObjectMeta.DeepCopyInto(&uKnownPod2.ObjectMeta) + uKnownPod2.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + + podKey := client.ObjectKey{Name: "test-pod-2", Namespace: testNamespaceTwo} + Expect(informerCache.Get(ctx, podKey, out)).To(Succeed()) + + By("verifying the retrieved pod is equal to a known pod") + Expect(out).To(Equal(uKnownPod2)) + + By("altering a field in the retrieved pod") + out.Labels["foo"] = "bar" + + By("verifying the pods are no longer equal") + Expect(out).NotTo(Equal(knownPod2)) + }) + } else { + It("should not deep copy the object if UnsafeDisableDeepCopy is enabled", func(ctx SpecContext) { + By("getting a specific pod from the cache twice") + podKey := client.ObjectKey{Name: "test-pod-2", Namespace: testNamespaceTwo} + out1 := &metav1.PartialObjectMetadata{} + out1.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}) + Expect(informerCache.Get(ctx, podKey, out1)).To(Succeed()) + out2 := &metav1.PartialObjectMetadata{} + out2.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}) + Expect(informerCache.Get(ctx, podKey, out2)).To(Succeed()) + + By("verifying the pods have the same pointer addresses") + By("verifying the pointer fields in pod have the same addresses") + Expect(out1).To(Equal(out2)) + Expect(reflect.ValueOf(out1.Labels).Pointer()).To(BeIdenticalTo(reflect.ValueOf(out2.Labels).Pointer())) + + By("listing pods from the cache twice") + outList1 := &metav1.PartialObjectMetadataList{} + outList1.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "PodList"}) + Expect(informerCache.List(ctx, outList1, client.InNamespace(testNamespaceOne))).To(Succeed()) + outList2 := &metav1.PartialObjectMetadataList{} + outList2.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "PodList"}) + Expect(informerCache.List(ctx, outList2, client.InNamespace(testNamespaceOne))).To(Succeed()) + + By("verifying the pointer fields in pod have the same addresses") + Expect(outList1.Items).To(HaveLen(len(outList2.Items))) + sort.SliceStable(outList1.Items, func(i, j int) bool { return outList1.Items[i].Name <= outList1.Items[j].Name }) + sort.SliceStable(outList2.Items, func(i, j int) bool { return outList2.Items[i].Name <= outList2.Items[j].Name }) + for i := range outList1.Items { + a := &outList1.Items[i] + b := &outList2.Items[i] + Expect(a).To(Equal(b)) + Expect(reflect.ValueOf(a.Labels).Pointer()).To(BeIdenticalTo(reflect.ValueOf(b.Labels).Pointer())) + } + }) + } + + It("should return an error if the object is not found", func(ctx SpecContext) { + By("getting a service that does not exists") + svc := &metav1.PartialObjectMetadata{} + svc.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Service", + }) + svcKey := client.ObjectKey{Namespace: testNamespaceOne, Name: "unknown"} + + By("verifying that an error is returned") + err := informerCache.Get(ctx, svcKey, svc) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + It("should return an error if getting object in unwatched namespace", func(ctx SpecContext) { + By("getting a service that does not exists") + svc := &corev1.Service{} + svcKey := client.ObjectKey{Namespace: "unknown", Name: "unknown"} + + By("verifying that an error is returned") + err := informerCache.Get(ctx, svcKey, svc) + Expect(err).To(HaveOccurred()) + }) + + It("should return an error if pagination is used", func(ctx SpecContext) { + nodeList := &metav1.PartialObjectMetadataList{} + nodeList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "NodeList", + }) + By("verifying that the first list works and returns a sentinel continue") + err := informerCache.List(ctx, nodeList) + Expect(err).ToNot(HaveOccurred()) + Expect(nodeList.GetContinue()).To(Equal("continue-not-supported")) + + By("verifying that an error is returned") + err = informerCache.List(ctx, nodeList, client.Continue(nodeList.GetContinue())) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("continue list option is not supported by the cache")) + }) + }) + type selectorsTestCase struct { + options cache.Options + expectedPods []string + } + DescribeTable(" and cache with selectors", func(ctx SpecContext, tc selectorsTestCase) { + By("creating the cache") + informer, err := cache.New(cfg, tc.options) + Expect(err).NotTo(HaveOccurred()) + + By("running the cache and waiting for it to sync") + go func() { + defer GinkgoRecover() + Expect(informer.Start(ctx)).To(Succeed()) + }() + Expect(informer.WaitForCacheSync(ctx)).To(BeTrue()) + + By("Checking with structured") + obtainedStructuredPodList := corev1.PodList{} + Expect(informer.List(ctx, &obtainedStructuredPodList)).To(Succeed()) + Expect(obtainedStructuredPodList.Items).Should(WithTransform(func(pods []corev1.Pod) []string { + obtainedPodNames := []string{} + for _, pod := range pods { + obtainedPodNames = append(obtainedPodNames, pod.Name) + } + return obtainedPodNames + }, ConsistOf(tc.expectedPods))) + for _, pod := range obtainedStructuredPodList.Items { + Expect(informer.Get(ctx, client.ObjectKeyFromObject(&pod), &pod)).To(Succeed()) + } + + By("Checking with unstructured") + obtainedUnstructuredPodList := unstructured.UnstructuredList{} + obtainedUnstructuredPodList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + err = informer.List(ctx, &obtainedUnstructuredPodList) + Expect(err).To(Succeed()) + Expect(obtainedUnstructuredPodList.Items).Should(WithTransform(func(pods []unstructured.Unstructured) []string { + obtainedPodNames := []string{} + for _, pod := range pods { + obtainedPodNames = append(obtainedPodNames, pod.GetName()) + } + return obtainedPodNames + }, ConsistOf(tc.expectedPods))) + for _, pod := range obtainedUnstructuredPodList.Items { + Expect(informer.Get(ctx, client.ObjectKeyFromObject(&pod), &pod)).To(Succeed()) + } + + By("Checking with metadata") + obtainedMetadataPodList := metav1.PartialObjectMetadataList{} + obtainedMetadataPodList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + err = informer.List(ctx, &obtainedMetadataPodList) + Expect(err).To(Succeed()) + Expect(obtainedMetadataPodList.Items).Should(WithTransform(func(pods []metav1.PartialObjectMetadata) []string { + obtainedPodNames := []string{} + for _, pod := range pods { + obtainedPodNames = append(obtainedPodNames, pod.Name) + } + return obtainedPodNames + }, ConsistOf(tc.expectedPods))) + for _, pod := range obtainedMetadataPodList.Items { + Expect(informer.Get(ctx, client.ObjectKeyFromObject(&pod), &pod)).To(Succeed()) + } + }, + Entry("when selectors are empty it has to inform about all the pods", selectorsTestCase{ + expectedPods: []string{"test-pod-1", "test-pod-2", "test-pod-3", "test-pod-4", "test-pod-5", "test-pod-6"}, + }), + Entry("type-level field selector matches one pod", selectorsTestCase{ + options: cache.Options{ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: {Field: fields.SelectorFromSet(map[string]string{ + "metadata.name": "test-pod-2", + })}, + }}, + expectedPods: []string{"test-pod-2"}, + }), + Entry("global field selector matches one pod", selectorsTestCase{ + options: cache.Options{ + DefaultFieldSelector: fields.SelectorFromSet(map[string]string{ + "metadata.name": "test-pod-2", + }), + }, + expectedPods: []string{"test-pod-2"}, + }), + Entry("type-level field selectors matches multiple pods", selectorsTestCase{ + options: cache.Options{ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: {Field: fields.SelectorFromSet(map[string]string{ + "metadata.namespace": testNamespaceTwo, + })}, + }}, + expectedPods: []string{"test-pod-2", "test-pod-3", "test-pod-6"}, + }), + Entry("global field selectors matches multiple pods", selectorsTestCase{ + options: cache.Options{ + DefaultFieldSelector: fields.SelectorFromSet(map[string]string{ + "metadata.namespace": testNamespaceTwo, + }), + }, + expectedPods: []string{"test-pod-2", "test-pod-3", "test-pod-6"}, + }), + Entry("type-level label selector matches one pod", selectorsTestCase{ + options: cache.Options{ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: {Label: labels.SelectorFromSet(map[string]string{ + "test-label": "test-pod-4", + })}, + }}, + expectedPods: []string{"test-pod-4"}, + }), + Entry("namespaces configured, type-level label selector matches everything, overrides global selector", selectorsTestCase{ + options: cache.Options{ + DefaultNamespaces: map[string]cache.Config{testNamespaceOne: {}}, + ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: {Label: labels.Everything()}, + }, + DefaultLabelSelector: labels.SelectorFromSet(map[string]string{"does-not": "match-anything"}), + }, + expectedPods: []string{"test-pod-1", "test-pod-5"}, + }), + Entry("namespaces configured, global selector is used", selectorsTestCase{ + options: cache.Options{ + DefaultNamespaces: map[string]cache.Config{testNamespaceTwo: {}}, + ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: {}, + }, + DefaultLabelSelector: labels.SelectorFromSet(map[string]string{"common-label": "common"}), + }, + expectedPods: []string{"test-pod-3"}, + }), + Entry("global label selector matches one pod", selectorsTestCase{ + options: cache.Options{ + DefaultLabelSelector: labels.SelectorFromSet(map[string]string{ + "test-label": "test-pod-4", + }), + }, + expectedPods: []string{"test-pod-4"}, + }), + Entry("type-level label selector matches multiple pods", selectorsTestCase{ + options: cache.Options{ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: {Label: labels.SelectorFromSet(map[string]string{ + "common-label": "common", + })}, + }}, + expectedPods: []string{"test-pod-3", "test-pod-4"}, + }), + Entry("global label selector matches multiple pods", selectorsTestCase{ + options: cache.Options{ + DefaultLabelSelector: labels.SelectorFromSet(map[string]string{ + "common-label": "common", + }), + }, + expectedPods: []string{"test-pod-3", "test-pod-4"}, + }), + Entry("type-level label and field selector, matches one pod", selectorsTestCase{ + options: cache.Options{ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: { + Label: labels.SelectorFromSet(map[string]string{"common-label": "common"}), + Field: fields.SelectorFromSet(map[string]string{"metadata.namespace": testNamespaceTwo}), + }, + }}, + expectedPods: []string{"test-pod-3"}, + }), + Entry("global label and field selector, matches one pod", selectorsTestCase{ + options: cache.Options{ + DefaultLabelSelector: labels.SelectorFromSet(map[string]string{"common-label": "common"}), + DefaultFieldSelector: fields.SelectorFromSet(map[string]string{"metadata.namespace": testNamespaceTwo}), + }, + expectedPods: []string{"test-pod-3"}, + }), + Entry("type-level label selector does not match, no results", selectorsTestCase{ + options: cache.Options{ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: {Label: labels.SelectorFromSet(map[string]string{ + "new-label": "new", + })}, + }}, + expectedPods: []string{}, + }), + Entry("global label selector does not match, no results", selectorsTestCase{ + options: cache.Options{ + DefaultLabelSelector: labels.SelectorFromSet(map[string]string{ + "new-label": "new", + }), + }, + expectedPods: []string{}, + }), + Entry("type-level field selector does not match, no results", selectorsTestCase{ + options: cache.Options{ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: {Field: fields.SelectorFromSet(map[string]string{ + "metadata.namespace": "new", + })}, + }}, + expectedPods: []string{}, + }), + Entry("global field selector does not match, no results", selectorsTestCase{ + options: cache.Options{ + DefaultFieldSelector: fields.SelectorFromSet(map[string]string{ + "metadata.namespace": "new", + }), + }, + expectedPods: []string{}, + }), + Entry("type-level field selector on namespace matches one pod", selectorsTestCase{ + options: cache.Options{ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: {Namespaces: map[string]cache.Config{ + testNamespaceTwo: { + FieldSelector: fields.SelectorFromSet(map[string]string{ + "metadata.name": "test-pod-2", + }), + }, + }}, + }}, + expectedPods: []string{"test-pod-2"}, + }), + Entry("type-level field selector on namespace doesn't match", selectorsTestCase{ + options: cache.Options{ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: {Namespaces: map[string]cache.Config{ + testNamespaceTwo: { + FieldSelector: fields.SelectorFromSet(map[string]string{ + "metadata.name": "test-pod-doesn-exist", + }), + }, + }}, + }}, + expectedPods: []string{}, + }), + Entry("global field selector on namespace matches one pod", selectorsTestCase{ + options: cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + testNamespaceTwo: { + FieldSelector: fields.SelectorFromSet(map[string]string{ + "metadata.name": "test-pod-2", + }), + }, + }, + }, + expectedPods: []string{"test-pod-2"}, + }), + Entry("global field selector on namespace doesn't match", selectorsTestCase{ + options: cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + testNamespaceTwo: { + FieldSelector: fields.SelectorFromSet(map[string]string{ + "metadata.name": "test-pod-doesn-exist", + }), + }, + }, + }, + expectedPods: []string{}, + }), + Entry("type-level label selector on namespace matches one pod", selectorsTestCase{ + options: cache.Options{ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: {Namespaces: map[string]cache.Config{ + testNamespaceTwo: { + LabelSelector: labels.SelectorFromSet(map[string]string{ + "test-label": "test-pod-2", + }), + }, + }}, + }}, + expectedPods: []string{"test-pod-2"}, + }), + Entry("type-level label selector on namespace doesn't match", selectorsTestCase{ + options: cache.Options{ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: {Namespaces: map[string]cache.Config{ + testNamespaceTwo: { + LabelSelector: labels.SelectorFromSet(map[string]string{ + "test-label": "test-pod-doesn-exist", + }), + }, + }}, + }}, + expectedPods: []string{}, + }), + Entry("global label selector on namespace matches one pod", selectorsTestCase{ + options: cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + testNamespaceTwo: { + LabelSelector: labels.SelectorFromSet(map[string]string{ + "test-label": "test-pod-2", + }), + }, + }, + }, + expectedPods: []string{"test-pod-2"}, + }), + Entry("global label selector on namespace doesn't match", selectorsTestCase{ + options: cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + testNamespaceTwo: { + LabelSelector: labels.SelectorFromSet(map[string]string{ + "test-label": "test-pod-doesn-exist", + }), + }, + }, + }, + expectedPods: []string{}, + }), + Entry("Only NamespaceAll in DefaultNamespaces returns all pods", selectorsTestCase{ + options: cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + metav1.NamespaceAll: {}, + }, + }, + expectedPods: []string{"test-pod-1", "test-pod-2", "test-pod-3", "test-pod-4", "test-pod-5", "test-pod-6"}, + }), + Entry("Only NamespaceAll in ByObject.Namespaces returns all pods", selectorsTestCase{ + options: cache.Options{ + ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: { + Namespaces: map[string]cache.Config{ + metav1.NamespaceAll: {}, + }, + }, + }, + }, + expectedPods: []string{"test-pod-1", "test-pod-2", "test-pod-3", "test-pod-4", "test-pod-5", "test-pod-6"}, + }), + Entry("NamespaceAll in DefaultNamespaces creates a cache for all Namespaces that are not in DefaultNamespaces", selectorsTestCase{ + options: cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + metav1.NamespaceAll: {}, + testNamespaceOne: { + // labels.Nothing when serialized matches everything, so we have to construct our own "match nothing" selector + LabelSelector: labels.SelectorFromSet(labels.Set{"no-present": "not-present"})}, + }, + }, + // All pods that are not in NamespaceOne + expectedPods: []string{"test-pod-2", "test-pod-3", "test-pod-4", "test-pod-6"}, + }), + Entry("NamespaceAll in ByObject.Namespaces creates a cache for all Namespaces that are not in ByObject.Namespaces", selectorsTestCase{ + options: cache.Options{ + ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: { + Namespaces: map[string]cache.Config{ + metav1.NamespaceAll: {}, + testNamespaceOne: { + // labels.Nothing when serialized matches everything, so we have to construct our own "match nothing" selector + LabelSelector: labels.SelectorFromSet(labels.Set{"no-present": "not-present"})}, + }, + }, + }, + }, + // All pods that are not in NamespaceOne + expectedPods: []string{"test-pod-2", "test-pod-3", "test-pod-4", "test-pod-6"}, + }), + ) + }) + Describe("as an Informer", func() { + It("should error when starting the cache a second time", func(ctx SpecContext) { + err := informerCache.Start(ctx) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("informer already started")) + }) + + Context("with structured objects", func() { + It("should be able to get informer for the object", func(ctx SpecContext) { By("getting a shared index informer for a pod") - pod := &kcorev1.Pod{ - ObjectMeta: kmetav1.ObjectMeta{ + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ Name: "informer-obj", Namespace: "default", }, - Spec: kcorev1.PodSpec{ - Containers: []kcorev1.Container{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ { Name: "nginx", Image: "nginx", @@ -474,7 +1948,7 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca }, }, } - sii, err := informerCache.GetInformer(pod) + sii, err := informerCache.GetInformer(ctx, pod) Expect(err).NotTo(HaveOccurred()) Expect(sii).NotTo(BeNil()) Expect(sii.HasSynced()).To(BeTrue()) @@ -484,23 +1958,57 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca addFunc := func(obj interface{}) { out <- obj } - sii.AddEventHandler(kcache.ResourceEventHandlerFuncs{AddFunc: addFunc}) + _, _ = sii.AddEventHandler(kcache.ResourceEventHandlerFuncs{AddFunc: addFunc}) By("adding an object") cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) - Expect(cl.Create(context.Background(), pod)).To(Succeed()) - defer deletePod(pod) + Expect(cl.Create(ctx, pod)).To(Succeed()) + defer deletePod(ctx, pod) By("verifying the object is received on the channel") Eventually(out).Should(Receive(Equal(pod))) - close(done) }) - // TODO: Add a test for when GVK is not in Scheme. Does code support informer for unstructured object? - It("should be able to get an informer by group/version/kind", func(done Done) { + It("should be able to stop and restart informers", func(ctx SpecContext) { + By("getting a shared index informer for a pod") + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "informer-obj", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + } + sii, err := informerCache.GetInformer(ctx, pod) + Expect(err).NotTo(HaveOccurred()) + Expect(sii).NotTo(BeNil()) + Expect(sii.HasSynced()).To(BeTrue()) + + By("removing the existing informer") + Expect(informerCache.RemoveInformer(ctx, pod)).To(Succeed()) + Eventually(sii.IsStopped).WithTimeout(5 * time.Second).Should(BeTrue()) + + By("recreating the informer") + + sii2, err := informerCache.GetInformer(ctx, pod) + Expect(err).NotTo(HaveOccurred()) + Expect(sii2).NotTo(BeNil()) + Expect(sii2.HasSynced()).To(BeTrue()) + + By("validating the two informers are in different states") + Expect(sii.IsStopped()).To(BeTrue()) + Expect(sii2.IsStopped()).To(BeFalse()) + }) + It("should be able to get an informer by group/version/kind", func(ctx SpecContext) { By("getting an shared index informer for gvk = core/v1/pod") gvk := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"} - sii, err := informerCache.GetInformerForKind(gvk) + sii, err := informerCache.GetInformerForKind(ctx, gvk) Expect(err).NotTo(HaveOccurred()) Expect(sii).NotTo(BeNil()) Expect(sii.HasSynced()).To(BeTrue()) @@ -510,18 +2018,18 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca addFunc := func(obj interface{}) { out <- obj } - sii.AddEventHandler(kcache.ResourceEventHandlerFuncs{AddFunc: addFunc}) + _, _ = sii.AddEventHandler(kcache.ResourceEventHandlerFuncs{AddFunc: addFunc}) By("adding an object") cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) - pod := &kcorev1.Pod{ - ObjectMeta: kmetav1.ObjectMeta{ + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ Name: "informer-gvk", Namespace: "default", }, - Spec: kcorev1.PodSpec{ - Containers: []kcorev1.Container{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ { Name: "nginx", Image: "nginx", @@ -529,54 +2037,169 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca }, }, } - Expect(cl.Create(context.Background(), pod)).To(Succeed()) - defer deletePod(pod) + Expect(cl.Create(ctx, pod)).To(Succeed()) + defer deletePod(ctx, pod) By("verifying the object is received on the channel") Eventually(out).Should(Receive(Equal(pod))) - close(done) }) - - It("should be able to index an object field then retrieve objects by that field", func() { + It("should be able to index an object field then retrieve objects by that field", func(ctx SpecContext) { By("creating the cache") informer, err := cache.New(cfg, cache.Options{}) Expect(err).NotTo(HaveOccurred()) By("indexing the restartPolicy field of the Pod object before starting") - pod := &kcorev1.Pod{} - indexFunc := func(obj runtime.Object) []string { - return []string{string(obj.(*kcorev1.Pod).Spec.RestartPolicy)} + pod := &corev1.Pod{} + indexFunc := func(obj client.Object) []string { + return []string{string(obj.(*corev1.Pod).Spec.RestartPolicy)} } - Expect(informer.IndexField(pod, "spec.restartPolicy", indexFunc)).To(Succeed()) + Expect(informer.IndexField(ctx, pod, "spec.restartPolicy", indexFunc)).To(Succeed()) By("running the cache and waiting for it to sync") go func() { defer GinkgoRecover() - Expect(informer.Start(stop)).To(Succeed()) + Expect(informer.Start(ctx)).To(Succeed()) }() - Expect(informer.WaitForCacheSync(stop)).NotTo(BeFalse()) + Expect(informer.WaitForCacheSync(ctx)).To(BeTrue()) By("listing Pods with restartPolicyOnFailure") - listObj := &kcorev1.PodList{} - Expect(informer.List(context.Background(), listObj, - client.MatchingField("spec.restartPolicy", "OnFailure"))).To(Succeed()) - + listObj := &corev1.PodList{} + Expect(informer.List(ctx, listObj, + client.MatchingFields{"spec.restartPolicy": "OnFailure"})).To(Succeed()) By("verifying that the returned pods have correct restart policy") Expect(listObj.Items).NotTo(BeEmpty()) Expect(listObj.Items).Should(HaveLen(1)) actual := listObj.Items[0] Expect(actual.Name).To(Equal("test-pod-3")) }) + + It("should allow for get informer to be cancelled", func(specCtx SpecContext) { + By("creating a context and cancelling it") + ctx, cancel := context.WithCancel(specCtx) + cancel() + + By("getting a shared index informer for a pod with a cancelled context") + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "informer-obj", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + } + sii, err := informerCache.GetInformer(ctx, pod) + Expect(err).To(HaveOccurred()) + Expect(sii).To(BeNil()) + Expect(apierrors.IsTimeout(err)).To(BeTrue()) + }) + + It("should allow getting an informer by group/version/kind to be cancelled", func(specCtx SpecContext) { + By("creating a context and cancelling it") + ctx, cancel := context.WithCancel(specCtx) + cancel() + + By("getting an shared index informer for gvk = core/v1/pod with a cancelled context") + gvk := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"} + sii, err := informerCache.GetInformerForKind(ctx, gvk) + Expect(err).To(HaveOccurred()) + Expect(sii).To(BeNil()) + Expect(apierrors.IsTimeout(err)).To(BeTrue()) + }) + + It("should be able not to change indexer values after indexing cluster-scope objects", func(ctx SpecContext) { + By("creating the cache") + informer, err := cache.New(cfg, cache.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("indexing the Namespace objects with fixed values before starting") + ns := &corev1.Namespace{} + indexerValues := []string{"a", "b", "c"} + fieldName := "fixedValues" + indexFunc := func(obj client.Object) []string { + return indexerValues + } + Expect(informer.IndexField(ctx, ns, fieldName, indexFunc)).To(Succeed()) + + By("running the cache and waiting for it to sync") + go func() { + defer GinkgoRecover() + Expect(informer.Start(ctx)).To(Succeed()) + }() + Expect(informer.WaitForCacheSync(ctx)).To(BeTrue()) + + By("listing Namespaces with fixed indexer") + listObj := &corev1.NamespaceList{} + Expect(informer.List(ctx, listObj, + client.MatchingFields{fieldName: "a"})).To(Succeed()) + Expect(listObj.Items).NotTo(BeZero()) + + By("verifying the indexing does not change fixed returned values") + Expect(indexerValues).Should(HaveLen(3)) + Expect(indexerValues[0]).To(Equal("a")) + Expect(indexerValues[1]).To(Equal("b")) + Expect(indexerValues[2]).To(Equal("c")) + }) + + It("should be able to matching fields with multiple indexes", func(ctx SpecContext) { + By("creating the cache") + informer, err := cache.New(cfg, cache.Options{}) + Expect(err).NotTo(HaveOccurred()) + + pod := &corev1.Pod{} + By("indexing pods with label before starting") + fieldName1 := "indexByLabel" + indexFunc1 := func(obj client.Object) []string { + return []string{obj.(*corev1.Pod).Labels["common-label"]} + } + Expect(informer.IndexField(ctx, pod, fieldName1, indexFunc1)).To(Succeed()) + By("indexing pods with restart policy before starting") + fieldName2 := "indexByPolicy" + indexFunc2 := func(obj client.Object) []string { + return []string{string(obj.(*corev1.Pod).Spec.RestartPolicy)} + } + Expect(informer.IndexField(ctx, pod, fieldName2, indexFunc2)).To(Succeed()) + + By("running the cache and waiting for it to sync") + go func() { + defer GinkgoRecover() + Expect(informer.Start(ctx)).To(Succeed()) + }() + Expect(informer.WaitForCacheSync(ctx)).To(BeTrue()) + + By("listing pods with label index") + listObj := &corev1.PodList{} + Expect(informer.List(ctx, listObj, + client.MatchingFields{fieldName1: "common"})).To(Succeed()) + Expect(listObj.Items).To(HaveLen(2)) + + By("listing pods with restart policy index") + listObj = &corev1.PodList{} + Expect(informer.List(ctx, listObj, + client.MatchingFields{fieldName2: string(corev1.RestartPolicyNever)})).To(Succeed()) + Expect(listObj.Items).To(HaveLen(3)) + + By("listing pods with both fixed indexers 1 and 2") + listObj = &corev1.PodList{} + Expect(informer.List(ctx, listObj, + client.MatchingFields{fieldName1: "common", fieldName2: string(corev1.RestartPolicyNever)})).To(Succeed()) + Expect(listObj.Items).To(HaveLen(1)) + }) }) Context("with unstructured objects", func() { - It("should be able to get informer for the object", func(done Done) { + It("should be able to get informer for the object", func(ctx SpecContext) { By("getting a shared index informer for a pod") pod := &unstructured.Unstructured{ Object: map[string]interface{}{ "spec": map[string]interface{}{ "containers": []map[string]interface{}{ - map[string]interface{}{ + { "name": "nginx", "image": "nginx", }, @@ -591,7 +2214,7 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca Version: "v1", Kind: "Pod", }) - sii, err := informerCache.GetInformer(pod) + sii, err := informerCache.GetInformer(ctx, pod) Expect(err).NotTo(HaveOccurred()) Expect(sii).NotTo(BeNil()) Expect(sii.HasSynced()).To(BeTrue()) @@ -601,20 +2224,60 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca addFunc := func(obj interface{}) { out <- obj } - sii.AddEventHandler(kcache.ResourceEventHandlerFuncs{AddFunc: addFunc}) + _, _ = sii.AddEventHandler(kcache.ResourceEventHandlerFuncs{AddFunc: addFunc}) By("adding an object") cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) - Expect(cl.Create(context.Background(), pod)).To(Succeed()) - defer deletePod(pod) + Expect(cl.Create(ctx, pod)).To(Succeed()) + defer deletePod(ctx, pod) By("verifying the object is received on the channel") Eventually(out).Should(Receive(Equal(pod))) - close(done) - }, 3) + }) - It("should be able to index an object field then retrieve objects by that field", func() { + It("should be able to stop and restart informers", func(ctx SpecContext) { + By("getting a shared index informer for a pod") + pod := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "containers": []map[string]interface{}{ + { + "name": "nginx", + "image": "nginx", + }, + }, + }, + }, + } + pod.SetName("informer-obj2") + pod.SetNamespace("default") + pod.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + sii, err := informerCache.GetInformer(ctx, pod) + Expect(err).NotTo(HaveOccurred()) + Expect(sii).NotTo(BeNil()) + Expect(sii.HasSynced()).To(BeTrue()) + + By("removing the existing informer") + Expect(informerCache.RemoveInformer(ctx, pod)).To(Succeed()) + Eventually(sii.IsStopped).WithTimeout(5 * time.Second).Should(BeTrue()) + + By("recreating the informer") + sii2, err := informerCache.GetInformer(ctx, pod) + Expect(err).NotTo(HaveOccurred()) + Expect(sii2).NotTo(BeNil()) + Expect(sii2.HasSynced()).To(BeTrue()) + + By("validating the two informers are in different states") + Expect(sii.IsStopped()).To(BeTrue()) + Expect(sii2.IsStopped()).To(BeFalse()) + }) + + It("should be able to index an object field then retrieve objects by that field", func(ctx SpecContext) { By("creating the cache") informer, err := cache.New(cfg, cache.Options{}) Expect(err).NotTo(HaveOccurred()) @@ -626,7 +2289,7 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca Version: "v1", Kind: "Pod", }) - indexFunc := func(obj runtime.Object) []string { + indexFunc := func(obj client.Object) []string { s, ok := obj.(*unstructured.Unstructured).Object["spec"] if !ok { return []string{} @@ -637,14 +2300,14 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca } return []string{fmt.Sprintf("%v", m["restartPolicy"])} } - Expect(informer.IndexField(pod, "spec.restartPolicy", indexFunc)).To(Succeed()) + Expect(informer.IndexField(ctx, pod, "spec.restartPolicy", indexFunc)).To(Succeed()) By("running the cache and waiting for it to sync") go func() { defer GinkgoRecover() - Expect(informer.Start(stop)).To(Succeed()) + Expect(informer.Start(ctx)).To(Succeed()) }() - Expect(informer.WaitForCacheSync(stop)).NotTo(BeFalse()) + Expect(informer.WaitForCacheSync(ctx)).To(BeTrue()) By("listing Pods with restartPolicyOnFailure") listObj := &unstructured.UnstructuredList{} @@ -653,17 +2316,275 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca Version: "v1", Kind: "PodList", }) - err = informer.List(context.Background(), listObj, - client.MatchingField("spec.restartPolicy", "OnFailure")) + err = informer.List(ctx, listObj, + client.MatchingFields{"spec.restartPolicy": "OnFailure"}) + Expect(err).To(Succeed()) + + By("verifying that the returned pods have correct restart policy") + Expect(listObj.Items).NotTo(BeEmpty()) + Expect(listObj.Items).Should(HaveLen(1)) + actual := listObj.Items[0] + Expect(actual.GetName()).To(Equal("test-pod-3")) + }) + + It("should allow for get informer to be cancelled", func(specCtx SpecContext) { + By("cancelling the context") + ctx := cancelledCtx(specCtx) + + By("getting a shared index informer for a pod with a cancelled context") + pod := &unstructured.Unstructured{} + pod.SetName("informer-obj2") + pod.SetNamespace("default") + pod.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + sii, err := informerCache.GetInformer(ctx, pod) + Expect(err).To(HaveOccurred()) + Expect(sii).To(BeNil()) + Expect(apierrors.IsTimeout(err)).To(BeTrue()) + }) + }) + Context("with metadata-only objects", func() { + It("should be able to get informer for the object", func(ctx SpecContext) { + By("getting a shared index informer for a pod") + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "informer-obj", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + } + + podMeta := &metav1.PartialObjectMetadata{} + pod.ObjectMeta.DeepCopyInto(&podMeta.ObjectMeta) + podMeta.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + + sii, err := informerCache.GetInformer(ctx, podMeta) + Expect(err).NotTo(HaveOccurred()) + Expect(sii).NotTo(BeNil()) + Expect(sii.HasSynced()).To(BeTrue()) + + By("adding an event handler listening for object creation which sends the object to a channel") + out := make(chan interface{}) + addFunc := func(obj interface{}) { + out <- obj + } + _, _ = sii.AddEventHandler(kcache.ResourceEventHandlerFuncs{AddFunc: addFunc}) + + By("adding an object") + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl.Create(ctx, pod)).To(Succeed()) + defer deletePod(ctx, pod) + // re-copy the result in so that we can match on it properly + pod.ObjectMeta.DeepCopyInto(&podMeta.ObjectMeta) + + By("verifying the object's metadata is received on the channel") + Eventually(out).Should(Receive(Equal(podMeta))) + }) + + It("should be able to index an object field then retrieve objects by that field", func(ctx SpecContext) { + By("creating the cache") + informer, err := cache.New(cfg, cache.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("indexing the restartPolicy field of the Pod object before starting") + pod := &metav1.PartialObjectMetadata{} + pod.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + indexFunc := func(obj client.Object) []string { + metadata := obj.(*metav1.PartialObjectMetadata) + return []string{metadata.Labels["test-label"]} + } + Expect(informer.IndexField(ctx, pod, "metadata.labels.test-label", indexFunc)).To(Succeed()) + + By("running the cache and waiting for it to sync") + go func() { + defer GinkgoRecover() + Expect(informer.Start(ctx)).To(Succeed()) + }() + Expect(informer.WaitForCacheSync(ctx)).To(BeTrue()) + + By("listing Pods with restartPolicyOnFailure") + listObj := &metav1.PartialObjectMetadataList{} + gvk := schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + } + listObj.SetGroupVersionKind(gvk) + err = informer.List(ctx, listObj, + client.MatchingFields{"metadata.labels.test-label": "test-pod-3"}) Expect(err).To(Succeed()) + By("verifying that the GVK has been preserved for the list object") + Expect(listObj.GroupVersionKind()).To(Equal(gvk)) + By("verifying that the returned pods have correct restart policy") Expect(listObj.Items).NotTo(BeEmpty()) Expect(listObj.Items).Should(HaveLen(1)) actual := listObj.Items[0] Expect(actual.GetName()).To(Equal("test-pod-3")) - }, 3) + + By("verifying that the GVK has been preserved for the item in the list") + Expect(actual.GroupVersionKind()).To(Equal(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + })) + }) + + It("should allow for get informer to be cancelled", func(specContext SpecContext) { + By("creating a context and cancelling it") + ctx := cancelledCtx(specContext) + + By("getting a shared index informer for a pod with a cancelled context") + pod := &metav1.PartialObjectMetadata{} + pod.SetName("informer-obj2") + pod.SetNamespace("default") + pod.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + sii, err := informerCache.GetInformer(ctx, pod) + Expect(err).To(HaveOccurred()) + Expect(sii).To(BeNil()) + Expect(apierrors.IsTimeout(err)).To(BeTrue()) + }) + }) + }) + Context("using UnsafeDisableDeepCopy", func() { + Describe("with ListOptions", func() { + It("should be able to change object in informer cache", func(ctx SpecContext) { + By("listing pods") + out := corev1.PodList{} + Expect(informerCache.List(ctx, &out, client.UnsafeDisableDeepCopy)).To(Succeed()) + for _, item := range out.Items { + if strings.Compare(item.Name, "test-pod-3") == 0 { // test-pod-3 has labels + item.Labels["UnsafeDisableDeepCopy"] = "true" + break + } + } + + By("verifying that the returned pods were changed") + out2 := corev1.PodList{} + Expect(informerCache.List(ctx, &out, client.UnsafeDisableDeepCopy)).To(Succeed()) + for _, item := range out2.Items { + if strings.Compare(item.Name, "test-pod-3") == 0 { + Expect(item.Labels["UnsafeDisableDeepCopy"]).To(Equal("true")) + break + } + } + }) + }) + Describe("with GetOptions", func() { + It("should be able to change object in informer cache", func(ctx SpecContext) { + out := corev1.Pod{} + podKey := client.ObjectKey{Name: "test-pod-2", Namespace: testNamespaceTwo} + Expect(informerCache.Get(ctx, podKey, &out, client.UnsafeDisableDeepCopy)).To(Succeed()) + + out.Labels["UnsafeDisableDeepCopy"] = "true" + + By("verifying that the returned pod was changed") + out2 := corev1.Pod{} + Expect(informerCache.Get(ctx, podKey, &out2, client.UnsafeDisableDeepCopy)).To(Succeed()) + Expect(out2.Labels["UnsafeDisableDeepCopy"]).To(Equal("true")) + }) }) }) }) } + +var _ = Describe("TransformStripManagedFields", func() { + It("should strip managed fields from an object", func() { + obj := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{ + ManagedFields: []metav1.ManagedFieldsEntry{{ + Manager: "foo", + }}, + }} + transformed, err := cache.TransformStripManagedFields()(obj) + Expect(err).NotTo(HaveOccurred()) + Expect(transformed).To(Equal(&corev1.Pod{ObjectMeta: metav1.ObjectMeta{}})) + }) + + It("should not trip over an unexpected object", func() { + transformed, err := cache.TransformStripManagedFields()("foo") + Expect(err).NotTo(HaveOccurred()) + Expect(transformed).To(Equal("foo")) + }) +}) + +// ensureNamespace installs namespace of a given name if not exists. +func ensureNamespace(ctx context.Context, namespace string, client client.Client) error { + ns := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Namespace", + APIVersion: "v1", + }, + } + err := client.Create(ctx, &ns) + if apierrors.IsAlreadyExists(err) { + return nil + } + return err +} + +func ensureNode(ctx context.Context, name string, client client.Client) error { + node := corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{"name": name}, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Node", + APIVersion: "v1", + }, + } + err := client.Create(ctx, &node) + if apierrors.IsAlreadyExists(err) { + return nil + } + return err +} + +func isKubeService(svc metav1.Object) bool { + // grumble grumble linters grumble grumble + return svc.GetNamespace() == "default" && svc.GetName() == "kubernetes" +} + +func isPodDisableDeepCopy(opts cache.Options) bool { + if opts.ByObject[&corev1.Pod{}].UnsafeDisableDeepCopy != nil { + return *opts.ByObject[&corev1.Pod{}].UnsafeDisableDeepCopy + } + if opts.DefaultUnsafeDisableDeepCopy != nil { + return *opts.DefaultUnsafeDisableDeepCopy + } + return false +} + +func cancelledCtx(ctx context.Context) context.Context { + cancelCtx, cancel := context.WithCancel(ctx) + cancel() + return cancelCtx +} diff --git a/pkg/cache/defaulting_test.go b/pkg/cache/defaulting_test.go new file mode 100644 index 0000000000..d9d0dcceb3 --- /dev/null +++ b/pkg/cache/defaulting_test.go @@ -0,0 +1,498 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "reflect" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + fuzz "github.com/google/gofuzz" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestDefaultOpts(t *testing.T) { + t.Parallel() + + pod := &corev1.Pod{} + + compare := func(a, b any) string { + return cmp.Diff(a, b, + cmpopts.IgnoreUnexported(Options{}), + cmpopts.IgnoreFields(Options{}, "HTTPClient", "Scheme", "Mapper", "SyncPeriod"), + cmp.Comparer(func(a, b fields.Selector) bool { + if (a != nil) != (b != nil) { + return false + } + if a == nil { + return true + } + return a.String() == b.String() + }), + ) + } + testCases := []struct { + name string + in Options + + verification func(Options) string + }{ + { + name: "ByObject.Namespaces gets defaulted from ByObject", + in: Options{ + ByObject: map[client.Object]ByObject{pod: { + Namespaces: map[string]Config{ + "default": {}, + }, + Label: labels.SelectorFromSet(map[string]string{"from": "by-object"}), + }}, + DefaultNamespaces: map[string]Config{ + "default": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "default-namespaces"})}, + }, + DefaultLabelSelector: labels.SelectorFromSet(map[string]string{"from": "default-label-selector"}), + }, + + verification: func(o Options) string { + expected := map[string]Config{ + "default": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "by-object"})}, + } + return cmp.Diff(expected, o.ByObject[pod].Namespaces) + }, + }, + { + name: "ByObject.Namespaces gets defaulted from DefaultNamespaces", + in: Options{ + ByObject: map[client.Object]ByObject{pod: { + Namespaces: map[string]Config{ + "default": {}, + }, + }}, + DefaultNamespaces: map[string]Config{ + "default": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "default-namespaces"})}, + }, + DefaultLabelSelector: labels.SelectorFromSet(map[string]string{"from": "default-label-selector"}), + }, + + verification: func(o Options) string { + expected := map[string]Config{ + "default": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "default-namespaces"})}, + } + return cmp.Diff(expected, o.ByObject[pod].Namespaces) + }, + }, + { + name: "ByObject.Namespaces gets defaulted from DefaultLabelSelector", + in: Options{ + ByObject: map[client.Object]ByObject{pod: { + Namespaces: map[string]Config{ + "default": {}, + }, + }}, + DefaultLabelSelector: labels.SelectorFromSet(map[string]string{"from": "default-label-selector"}), + }, + + verification: func(o Options) string { + expected := map[string]Config{ + "default": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "default-label-selector"})}, + } + return cmp.Diff(expected, o.ByObject[pod].Namespaces) + }, + }, + { + name: "ByObject.Namespaces gets defaulted from DefaultNamespaces", + in: Options{ + ByObject: map[client.Object]ByObject{pod: {}}, + DefaultNamespaces: map[string]Config{ + "default": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "default-namespaces"})}, + }, + }, + + verification: func(o Options) string { + expected := map[string]Config{ + "default": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "default-namespaces"})}, + } + return cmp.Diff(expected, o.ByObject[pod].Namespaces) + }, + }, + { + name: "ByObject.Namespaces doesn't get defaulted when its empty", + in: Options{ + ByObject: map[client.Object]ByObject{pod: {Namespaces: map[string]Config{}}}, + DefaultNamespaces: map[string]Config{ + "default": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "default-namespaces"})}, + }, + }, + + verification: func(o Options) string { + expected := map[string]Config{} + return cmp.Diff(expected, o.ByObject[pod].Namespaces) + }, + }, + { + name: "ByObject.Labels gets defaulted from DefautLabelSelector", + in: Options{ + ByObject: map[client.Object]ByObject{pod: {}}, + DefaultLabelSelector: labels.SelectorFromSet(map[string]string{"from": "default-label-selector"}), + }, + + verification: func(o Options) string { + expected := labels.SelectorFromSet(map[string]string{"from": "default-label-selector"}) + return cmp.Diff(expected, o.ByObject[pod].Label) + }, + }, + { + name: "ByObject.Labels doesn't get defaulted when set", + in: Options{ + ByObject: map[client.Object]ByObject{pod: {Label: labels.SelectorFromSet(map[string]string{"from": "by-object"})}}, + DefaultLabelSelector: labels.SelectorFromSet(map[string]string{"from": "default-label-selector"}), + }, + + verification: func(o Options) string { + expected := labels.SelectorFromSet(map[string]string{"from": "by-object"}) + return cmp.Diff(expected, o.ByObject[pod].Label) + }, + }, + { + name: "ByObject.Fields gets defaulted from DefaultFieldSelector", + in: Options{ + ByObject: map[client.Object]ByObject{pod: {}}, + DefaultFieldSelector: fields.SelectorFromSet(map[string]string{"from": "default-field-selector"}), + }, + + verification: func(o Options) string { + expected := fields.SelectorFromSet(map[string]string{"from": "default-field-selector"}) + return cmp.Diff(expected, o.ByObject[pod].Field, cmp.Exporter(func(reflect.Type) bool { return true })) + }, + }, + { + name: "ByObject.Fields doesn't get defaulted when set", + in: Options{ + ByObject: map[client.Object]ByObject{pod: {Field: fields.SelectorFromSet(map[string]string{"from": "by-object"})}}, + DefaultFieldSelector: fields.SelectorFromSet(map[string]string{"from": "default-field-selector"}), + }, + + verification: func(o Options) string { + expected := fields.SelectorFromSet(map[string]string{"from": "by-object"}) + return cmp.Diff(expected, o.ByObject[pod].Field, cmp.Exporter(func(reflect.Type) bool { return true })) + }, + }, + { + name: "ByObject.UnsafeDisableDeepCopy gets defaulted from DefaultUnsafeDisableDeepCopy", + in: Options{ + ByObject: map[client.Object]ByObject{pod: {}}, + DefaultUnsafeDisableDeepCopy: ptr.To(true), + }, + + verification: func(o Options) string { + expected := ptr.To(true) + return cmp.Diff(expected, o.ByObject[pod].UnsafeDisableDeepCopy) + }, + }, + { + name: "ByObject.UnsafeDisableDeepCopy doesn't get defaulted when set", + in: Options{ + ByObject: map[client.Object]ByObject{pod: {UnsafeDisableDeepCopy: ptr.To(false)}}, + DefaultUnsafeDisableDeepCopy: ptr.To(true), + }, + + verification: func(o Options) string { + expected := ptr.To(false) + return cmp.Diff(expected, o.ByObject[pod].UnsafeDisableDeepCopy) + }, + }, + { + name: "ByObject.EnableWatchBookmarks gets defaulted from DefaultEnableWatchBookmarks", + in: Options{ + ByObject: map[client.Object]ByObject{pod: {}}, + DefaultEnableWatchBookmarks: ptr.To(true), + }, + + verification: func(o Options) string { + expected := ptr.To(true) + return cmp.Diff(expected, o.ByObject[pod].EnableWatchBookmarks) + }, + }, + { + name: "ByObject.EnableWatchBookmarks doesn't get defaulted when set", + in: Options{ + ByObject: map[client.Object]ByObject{pod: {EnableWatchBookmarks: ptr.To(false)}}, + DefaultEnableWatchBookmarks: ptr.To(true), + }, + + verification: func(o Options) string { + expected := ptr.To(false) + return cmp.Diff(expected, o.ByObject[pod].EnableWatchBookmarks) + }, + }, + { + name: "DefaultNamespace label selector gets defaulted from DefaultLabelSelector", + in: Options{ + DefaultNamespaces: map[string]Config{"default": {}}, + DefaultLabelSelector: labels.SelectorFromSet(map[string]string{"from": "default-label-selector"}), + }, + + verification: func(o Options) string { + expected := map[string]Config{ + "default": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "default-label-selector"})}, + } + return cmp.Diff(expected, o.DefaultNamespaces) + }, + }, + { + name: "ByObject.Namespaces get selector from DefaultNamespaces before DefaultSelector", + in: Options{ + ByObject: map[client.Object]ByObject{ + pod: {Namespaces: map[string]Config{"default": {}}}, + }, + DefaultNamespaces: map[string]Config{"default": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "namespace"})}}, + DefaultLabelSelector: labels.SelectorFromSet(map[string]string{"from": "default"}), + }, + + verification: func(o Options) string { + expected := Options{ + ByObject: map[client.Object]ByObject{ + pod: {Namespaces: map[string]Config{"default": { + LabelSelector: labels.SelectorFromSet(map[string]string{"from": "namespace"}), + }}}, + }, + DefaultNamespaces: map[string]Config{"default": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "namespace"})}}, + DefaultLabelSelector: labels.SelectorFromSet(map[string]string{"from": "default"}), + } + + return compare(expected, o) + }, + }, + { + name: "Two namespaces in DefaultNamespaces with custom selection logic", + in: Options{DefaultNamespaces: map[string]Config{ + "kube-public": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-public"})}, + "kube-system": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-system"})}, + "": {}, + }}, + + verification: func(o Options) string { + expected := Options{ + DefaultNamespaces: map[string]Config{ + "kube-public": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-public"})}, + "kube-system": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-system"})}, + "": {FieldSelector: fields.ParseSelectorOrDie("metadata.namespace!=kube-public,metadata.namespace!=kube-system")}, + }, + } + + return compare(expected, o) + }, + }, + { + name: "Two namespaces in DefaultNamespaces with custom selection logic and namespace default has its own field selector", + in: Options{DefaultNamespaces: map[string]Config{ + "kube-public": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-public"})}, + "kube-system": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-system"})}, + "": {FieldSelector: fields.ParseSelectorOrDie("spec.nodeName=foo")}, + }}, + + verification: func(o Options) string { + expected := Options{ + DefaultNamespaces: map[string]Config{ + "kube-public": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-public"})}, + "kube-system": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-system"})}, + "": {FieldSelector: fields.ParseSelectorOrDie( + "metadata.namespace!=kube-public,metadata.namespace!=kube-system,spec.nodeName=foo", + )}, + }, + } + + return compare(expected, o) + }, + }, + { + name: "Two namespaces in ByObject.Namespaces with custom selection logic", + in: Options{ByObject: map[client.Object]ByObject{pod: { + Namespaces: map[string]Config{ + "kube-public": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-public"})}, + "kube-system": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-system"})}, + "": {}, + }, + }}}, + + verification: func(o Options) string { + expected := Options{ByObject: map[client.Object]ByObject{pod: { + Namespaces: map[string]Config{ + "kube-public": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-public"})}, + "kube-system": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-system"})}, + "": {FieldSelector: fields.ParseSelectorOrDie( + "metadata.namespace!=kube-public,metadata.namespace!=kube-system", + )}, + }, + }}} + + return compare(expected, o) + }, + }, + { + name: "Two namespaces in ByObject.Namespaces with custom selection logic and namespace default has its own field selector", + in: Options{ByObject: map[client.Object]ByObject{pod: { + Namespaces: map[string]Config{ + "kube-public": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-public"})}, + "kube-system": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-system"})}, + "": {FieldSelector: fields.ParseSelectorOrDie("spec.nodeName=foo")}, + }, + }}}, + + verification: func(o Options) string { + expected := Options{ByObject: map[client.Object]ByObject{pod: { + Namespaces: map[string]Config{ + "kube-public": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-public"})}, + "kube-system": {LabelSelector: labels.SelectorFromSet(map[string]string{"from": "kube-system"})}, + "": {FieldSelector: fields.ParseSelectorOrDie( + "metadata.namespace!=kube-public,metadata.namespace!=kube-system,spec.nodeName=foo", + )}, + }, + }}} + + return compare(expected, o) + }, + }, + { + name: "DefaultNamespace label selector doesn't get defaulted when set", + in: Options{ + DefaultNamespaces: map[string]Config{"default": {LabelSelector: labels.Everything()}}, + DefaultLabelSelector: labels.SelectorFromSet(map[string]string{"from": "default-label-selector"}), + }, + + verification: func(o Options) string { + expected := map[string]Config{ + "default": {LabelSelector: labels.Everything()}, + } + return cmp.Diff(expected, o.DefaultNamespaces) + }, + }, + { + name: "Defaulted namespaces in ByObject contain ByObject's selector", + in: Options{ + ByObject: map[client.Object]ByObject{ + pod: {Label: labels.SelectorFromSet(map[string]string{"from": "pod"})}, + }, + DefaultNamespaces: map[string]Config{"default": {}}, + }, + verification: func(o Options) string { + expected := Options{ + ByObject: map[client.Object]ByObject{ + pod: { + Label: labels.SelectorFromSet(map[string]string{"from": "pod"}), + Namespaces: map[string]Config{"default": { + LabelSelector: labels.SelectorFromSet(map[string]string{"from": "pod"}), + }}, + }, + }, + + DefaultNamespaces: map[string]Config{"default": {}}, + } + return compare(expected, o) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tc.in.Mapper = &fakeRESTMapper{} + + defaulted, err := defaultOpts(&rest.Config{}, tc.in) + if err != nil { + t.Fatal(err) + } + + if diff := tc.verification(defaulted); diff != "" { + t.Errorf("expected config differs from actual: %s", diff) + } + }) + } +} + +func TestDefaultOptsRace(t *testing.T) { + opts := Options{ + Mapper: &fakeRESTMapper{}, + ByObject: map[client.Object]ByObject{ + &corev1.Pod{}: { + Label: labels.SelectorFromSet(map[string]string{"from": "pod"}), + Namespaces: map[string]Config{"default": { + LabelSelector: labels.SelectorFromSet(map[string]string{"from": "pod"}), + }}, + }, + }, + DefaultNamespaces: map[string]Config{"default": {}}, + } + + // Start go routines which re-use the above options struct. + wg := sync.WaitGroup{} + for range 2 { + wg.Add(1) + go func() { + _, _ = defaultOpts(&rest.Config{}, opts) + wg.Done() + }() + } + + // Wait for the go routines to finish. + wg.Wait() +} + +type fakeRESTMapper struct { + meta.RESTMapper +} + +func (f *fakeRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + return &meta.RESTMapping{Scope: meta.RESTScopeNamespace}, nil +} + +func TestDefaultConfigConsidersAllFields(t *testing.T) { + t.Parallel() + seed := time.Now().UnixNano() + t.Logf("Seed is %d", seed) + f := fuzz.NewWithSeed(seed).Funcs( + func(ls *labels.Selector, _ fuzz.Continue) { + *ls = labels.SelectorFromSet(map[string]string{"foo": "bar"}) + }, + func(fs *fields.Selector, _ fuzz.Continue) { + *fs = fields.SelectorFromSet(map[string]string{"foo": "bar"}) + }, + func(tf *cache.TransformFunc, _ fuzz.Continue) { + // never default this, as functions can not be compared so we fail down the line + }, + ) + + for i := 0; i < 100; i++ { + fuzzed := Config{} + f.Fuzz(&fuzzed) + + defaulted := defaultConfig(Config{}, fuzzed) + + if diff := cmp.Diff(fuzzed, defaulted, cmp.Exporter(func(reflect.Type) bool { return true })); diff != "" { + t.Errorf("Defaulted config doesn't match fuzzed one: %s", diff) + } + } +} diff --git a/pkg/cache/delegating_by_gvk_cache.go b/pkg/cache/delegating_by_gvk_cache.go new file mode 100644 index 0000000000..46bd243c66 --- /dev/null +++ b/pkg/cache/delegating_by_gvk_cache.go @@ -0,0 +1,136 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "maps" + "slices" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// delegatingByGVKCache delegates to a type-specific cache if present +// and uses the defaultCache otherwise. +type delegatingByGVKCache struct { + scheme *runtime.Scheme + caches map[schema.GroupVersionKind]Cache + defaultCache Cache +} + +func (dbt *delegatingByGVKCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + cache, err := dbt.cacheForObject(obj) + if err != nil { + return err + } + return cache.Get(ctx, key, obj, opts...) +} + +func (dbt *delegatingByGVKCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + cache, err := dbt.cacheForObject(list) + if err != nil { + return err + } + return cache.List(ctx, list, opts...) +} + +func (dbt *delegatingByGVKCache) RemoveInformer(ctx context.Context, obj client.Object) error { + cache, err := dbt.cacheForObject(obj) + if err != nil { + return err + } + return cache.RemoveInformer(ctx, obj) +} + +func (dbt *delegatingByGVKCache) GetInformer(ctx context.Context, obj client.Object, opts ...InformerGetOption) (Informer, error) { + cache, err := dbt.cacheForObject(obj) + if err != nil { + return nil, err + } + return cache.GetInformer(ctx, obj, opts...) +} + +func (dbt *delegatingByGVKCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...InformerGetOption) (Informer, error) { + return dbt.cacheForGVK(gvk).GetInformerForKind(ctx, gvk, opts...) +} + +func (dbt *delegatingByGVKCache) Start(ctx context.Context) error { + allCaches := slices.Collect(maps.Values(dbt.caches)) + allCaches = append(allCaches, dbt.defaultCache) + + wg := &sync.WaitGroup{} + errs := make(chan error) + for idx := range allCaches { + cache := allCaches[idx] + wg.Add(1) + go func() { + defer wg.Done() + if err := cache.Start(ctx); err != nil { + errs <- err + } + }() + } + + select { + case err := <-errs: + return err + case <-ctx.Done(): + wg.Wait() + return nil + } +} + +func (dbt *delegatingByGVKCache) WaitForCacheSync(ctx context.Context) bool { + synced := true + for _, cache := range append(slices.Collect(maps.Values(dbt.caches)), dbt.defaultCache) { + if !cache.WaitForCacheSync(ctx) { + synced = false + } + } + + return synced +} + +func (dbt *delegatingByGVKCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { + cache, err := dbt.cacheForObject(obj) + if err != nil { + return err + } + return cache.IndexField(ctx, obj, field, extractValue) +} + +func (dbt *delegatingByGVKCache) cacheForObject(o runtime.Object) (Cache, error) { + gvk, err := apiutil.GVKForObject(o, dbt.scheme) + if err != nil { + return nil, err + } + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + return dbt.cacheForGVK(gvk), nil +} + +func (dbt *delegatingByGVKCache) cacheForGVK(gvk schema.GroupVersionKind) Cache { + if specific, hasSpecific := dbt.caches[gvk]; hasSpecific { + return specific + } + + return dbt.defaultCache +} diff --git a/pkg/cache/informer_cache.go b/pkg/cache/informer_cache.go index 6348937466..091667b7fa 100644 --- a/pkg/cache/informer_cache.go +++ b/pkg/cache/informer_cache.go @@ -19,14 +19,15 @@ package cache import ( "context" "fmt" - "reflect" "strings" apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/cache/internal" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" @@ -38,110 +39,191 @@ var ( _ Cache = &informerCache{} ) -// informerCache is a Kubernetes Object cache populated from InformersMap. informerCache wraps an InformersMap. +// ErrCacheNotStarted is returned when trying to read from the cache that wasn't started. +type ErrCacheNotStarted struct{} + +func (*ErrCacheNotStarted) Error() string { + return "the cache is not started, can not read objects" +} + +var _ error = (*ErrCacheNotStarted)(nil) + +// ErrResourceNotCached indicates that the resource type +// the client asked the cache for is not cached, i.e. the +// corresponding informer does not exist yet. +type ErrResourceNotCached struct { + GVK schema.GroupVersionKind +} + +// Error returns the error +func (r ErrResourceNotCached) Error() string { + return fmt.Sprintf("%s is not cached", r.GVK.String()) +} + +var _ error = (*ErrResourceNotCached)(nil) + +// informerCache is a Kubernetes Object cache populated from internal.Informers. +// informerCache wraps internal.Informers. type informerCache struct { - *internal.InformersMap + scheme *runtime.Scheme + *internal.Informers + readerFailOnMissingInformer bool } -// Get implements Reader -func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out runtime.Object) error { - gvk, err := apiutil.GVKForObject(out, ip.Scheme) +// Get implements Reader. +func (ic *informerCache) Get(ctx context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error { + gvk, err := apiutil.GVKForObject(out, ic.scheme) if err != nil { return err } - cache, err := ip.InformersMap.Get(gvk, out) + started, cache, err := ic.getInformerForKind(ctx, gvk, out) if err != nil { return err } - return cache.Reader.Get(ctx, key, out) + + if !started { + return &ErrCacheNotStarted{} + } + return cache.Reader.Get(ctx, key, out, opts...) } -// List implements Reader -func (ip *informerCache) List(ctx context.Context, out runtime.Object, opts ...client.ListOptionFunc) error { - gvk, err := apiutil.GVKForObject(out, ip.Scheme) +// List implements Reader. +func (ic *informerCache) List(ctx context.Context, out client.ObjectList, opts ...client.ListOption) error { + gvk, cacheTypeObj, err := ic.objectTypeForListObject(out) if err != nil { return err } - if !strings.HasSuffix(gvk.Kind, "List") { - return fmt.Errorf("non-list type %T (kind %q) passed as output", out, gvk) + started, cache, err := ic.getInformerForKind(ctx, *gvk, cacheTypeObj) + if err != nil { + return err + } + + if !started { + return &ErrCacheNotStarted{} + } + + return cache.Reader.List(ctx, out, opts...) +} + +// objectTypeForListObject tries to find the runtime.Object and associated GVK +// for a single object corresponding to the passed-in list type. We need them +// because they are used as cache map key. +func (ic *informerCache) objectTypeForListObject(list client.ObjectList) (*schema.GroupVersionKind, runtime.Object, error) { + gvk, err := apiutil.GVKForObject(list, ic.scheme) + if err != nil { + return nil, nil, err } - // we need the non-list GVK, so chop off the "List" from the end of the kind - gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] - _, isUnstructured := out.(*unstructured.UnstructuredList) - var cacheTypeObj runtime.Object - if isUnstructured { + + // We need the non-list GVK, so chop off the "List" from the end of the kind. + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + // Handle unstructured.UnstructuredList. + if _, isUnstructured := list.(runtime.Unstructured); isUnstructured { u := &unstructured.Unstructured{} u.SetGroupVersionKind(gvk) - cacheTypeObj = u - } else { - itemsPtr, err := apimeta.GetItemsPtr(out) - if err != nil { - return nil - } - // http://knowyourmeme.com/memes/this-is-fine - elemType := reflect.Indirect(reflect.ValueOf(itemsPtr)).Type().Elem() - cacheTypeValue := reflect.Zero(reflect.PtrTo(elemType)) - var ok bool - cacheTypeObj, ok = cacheTypeValue.Interface().(runtime.Object) - if !ok { - return fmt.Errorf("cannot get cache for %T, its element %T is not a runtime.Object", out, cacheTypeValue.Interface()) - } + return &gvk, u, nil + } + // Handle metav1.PartialObjectMetadataList. + if _, isPartialObjectMetadata := list.(*metav1.PartialObjectMetadataList); isPartialObjectMetadata { + pom := &metav1.PartialObjectMetadata{} + pom.SetGroupVersionKind(gvk) + return &gvk, pom, nil } - cache, err := ip.InformersMap.Get(gvk, cacheTypeObj) + // Any other list type should have a corresponding non-list type registered + // in the scheme. Use that to create a new instance of the non-list type. + cacheTypeObj, err := ic.scheme.New(gvk) if err != nil { - return err + return nil, nil, err } + return &gvk, cacheTypeObj, nil +} - return cache.Reader.List(ctx, out, opts...) +func applyGetOptions(opts ...InformerGetOption) *internal.GetOptions { + cfg := &InformerGetOptions{} + for _, opt := range opts { + opt(cfg) + } + return (*internal.GetOptions)(cfg) } -// GetInformerForKind returns the informer for the GroupVersionKind -func (ip *informerCache) GetInformerForKind(gvk schema.GroupVersionKind) (Informer, error) { +// GetInformerForKind returns the informer for the GroupVersionKind. If no informer exists, one will be started. +func (ic *informerCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...InformerGetOption) (Informer, error) { // Map the gvk to an object - obj, err := ip.Scheme.New(gvk) + obj, err := ic.scheme.New(gvk) if err != nil { return nil, err } - i, err := ip.InformersMap.Get(gvk, obj) + + _, i, err := ic.Informers.Get(ctx, gvk, obj, applyGetOptions(opts...)) if err != nil { return nil, err } - return i.Informer, err + return i.Informer, nil } -// GetInformer returns the informer for the obj -func (ip *informerCache) GetInformer(obj runtime.Object) (Informer, error) { - gvk, err := apiutil.GVKForObject(obj, ip.Scheme) +// GetInformer returns the informer for the obj. If no informer exists, one will be started. +func (ic *informerCache) GetInformer(ctx context.Context, obj client.Object, opts ...InformerGetOption) (Informer, error) { + gvk, err := apiutil.GVKForObject(obj, ic.scheme) if err != nil { return nil, err } - i, err := ip.InformersMap.Get(gvk, obj) + + _, i, err := ic.Informers.Get(ctx, gvk, obj, applyGetOptions(opts...)) if err != nil { return nil, err } - return i.Informer, err + return i.Informer, nil +} + +func (ic *informerCache) getInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *internal.Cache, error) { + if ic.readerFailOnMissingInformer { + cache, started, ok := ic.Informers.Peek(gvk, obj) + if !ok { + return false, nil, &ErrResourceNotCached{GVK: gvk} + } + return started, cache, nil + } + + return ic.Informers.Get(ctx, gvk, obj, &internal.GetOptions{}) +} + +// RemoveInformer deactivates and removes the informer from the cache. +func (ic *informerCache) RemoveInformer(_ context.Context, obj client.Object) error { + gvk, err := apiutil.GVKForObject(obj, ic.scheme) + if err != nil { + return err + } + + ic.Informers.Remove(gvk, obj) + return nil +} + +// NeedLeaderElection implements the LeaderElectionRunnable interface +// to indicate that this can be started without requiring the leader lock. +func (ic *informerCache) NeedLeaderElection() bool { + return false } -// IndexField adds an indexer to the underlying cache, using extraction function to get -// value(s) from the given field. This index can then be used by passing a field selector +// IndexField adds an indexer to the underlying informer, using extractValue function to get +// value(s) from the given field. This index can then be used by passing a field selector // to List. For one-to-one compatibility with "normal" field selectors, only return one value. -// The values may be anything. They will automatically be prefixed with the namespace of the -// given object, if present. The objects passed are guaranteed to be objects of the correct type. -func (ip *informerCache) IndexField(obj runtime.Object, field string, extractValue client.IndexerFunc) error { - informer, err := ip.GetInformer(obj) +// The values may be anything. They will automatically be prefixed with the namespace of the +// given object, if present. The objects passed are guaranteed to be objects of the correct type. +func (ic *informerCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { + informer, err := ic.GetInformer(ctx, obj) if err != nil { return err } return indexByField(informer, field, extractValue) } -func indexByField(indexer Informer, field string, extractor client.IndexerFunc) error { +func indexByField(informer Informer, field string, extractValue client.IndexerFunc) error { indexFunc := func(objRaw interface{}) ([]string, error) { // TODO(directxman12): check if this is the correct type? - obj, isObj := objRaw.(runtime.Object) + obj, isObj := objRaw.(client.Object) if !isObj { return nil, fmt.Errorf("object of type %T is not an Object", objRaw) } @@ -151,11 +233,11 @@ func indexByField(indexer Informer, field string, extractor client.IndexerFunc) } ns := meta.GetNamespace() - rawVals := extractor(obj) + rawVals := extractValue(obj) var vals []string if ns == "" { - // if we're not doubling the keys for the namespaced case, just re-use what was returned to us - vals = rawVals + // if we're not doubling the keys for the namespaced case, just create a new slice with same length + vals = make([]string, len(rawVals)) } else { // if we need to add non-namespaced versions too, double the length vals = make([]string, len(rawVals)*2) @@ -174,5 +256,5 @@ func indexByField(indexer Informer, field string, extractor client.IndexerFunc) return vals, nil } - return indexer.AddIndexers(cache.Indexers{internal.FieldIndexName(field): indexFunc}) + return informer.AddIndexers(cache.Indexers{internal.FieldIndexName(field): indexFunc}) } diff --git a/pkg/cache/informer_cache_test.go b/pkg/cache/informer_cache_test.go new file mode 100644 index 0000000000..617e74c4e5 --- /dev/null +++ b/pkg/cache/informer_cache_test.go @@ -0,0 +1,46 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +var _ = Describe("informerCache", func() { + It("should not require LeaderElection", func() { + cfg := &rest.Config{} + + httpClient, err := rest.HTTPClientFor(cfg) + Expect(err).ToNot(HaveOccurred()) + mapper, err := apiutil.NewDynamicRESTMapper(cfg, httpClient) + Expect(err).ToNot(HaveOccurred()) + + c, err := cache.New(cfg, cache.Options{Mapper: mapper}) + Expect(err).ToNot(HaveOccurred()) + + leaderElectionRunnable, ok := c.(manager.LeaderElectionRunnable) + Expect(ok).To(BeTrue()) + Expect(leaderElectionRunnable.NeedLeaderElection()).To(BeFalse()) + }) +}) diff --git a/pkg/cache/informer_cache_unit_test.go b/pkg/cache/informer_cache_unit_test.go new file mode 100644 index 0000000000..4772223748 --- /dev/null +++ b/pkg/cache/informer_cache_unit_test.go @@ -0,0 +1,109 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/scheme" + + "sigs.k8s.io/controller-runtime/pkg/cache/internal" + "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" + crscheme "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +const ( + itemPointerSliceTypeGroupName = "jakob.fabian" + itemPointerSliceTypeVersion = "v1" +) + +var _ = Describe("ip.objectTypeForListObject", func() { + ip := &informerCache{ + scheme: scheme.Scheme, + Informers: &internal.Informers{}, + } + + It("should find the object type for unstructured lists", func() { + unstructuredList := &unstructured.UnstructuredList{} + unstructuredList.SetAPIVersion("v1") + unstructuredList.SetKind("PodList") + + gvk, obj, err := ip.objectTypeForListObject(unstructuredList) + Expect(err).ToNot(HaveOccurred()) + Expect(gvk.Group).To(Equal("")) + Expect(gvk.Version).To(Equal("v1")) + Expect(gvk.Kind).To(Equal("Pod")) + referenceUnstructured := &unstructured.Unstructured{} + referenceUnstructured.SetGroupVersionKind(*gvk) + Expect(obj).To(Equal(referenceUnstructured)) + }) + + It("should find the object type for partial object metadata lists", func() { + partialList := &metav1.PartialObjectMetadataList{} + partialList.APIVersion = ("v1") + partialList.Kind = "PodList" + + gvk, obj, err := ip.objectTypeForListObject(partialList) + Expect(err).ToNot(HaveOccurred()) + Expect(gvk.Group).To(Equal("")) + Expect(gvk.Version).To(Equal("v1")) + Expect(gvk.Kind).To(Equal("Pod")) + referencePartial := &metav1.PartialObjectMetadata{} + referencePartial.SetGroupVersionKind(*gvk) + Expect(obj).To(Equal(referencePartial)) + }) + + It("should find the object type of a list with a slice of literals items field", func() { + gvk, obj, err := ip.objectTypeForListObject(&corev1.PodList{}) + Expect(err).ToNot(HaveOccurred()) + Expect(gvk.Group).To(Equal("")) + Expect(gvk.Version).To(Equal("v1")) + Expect(gvk.Kind).To(Equal("Pod")) + referencePod := &corev1.Pod{} + Expect(obj).To(Equal(referencePod)) + }) + + It("should find the object type of a list with a slice of pointers items field", func() { + By("registering the type", func() { + ip.scheme = runtime.NewScheme() + err := (&crscheme.Builder{ + GroupVersion: schema.GroupVersion{Group: itemPointerSliceTypeGroupName, Version: itemPointerSliceTypeVersion}, + }). + Register( + &controllertest.UnconventionalListType{}, + &controllertest.UnconventionalListTypeList{}, + ).AddToScheme(ip.scheme) + Expect(err).ToNot(HaveOccurred()) + }) + + By("calling objectTypeForListObject", func() { + gvk, obj, err := ip.objectTypeForListObject(&controllertest.UnconventionalListTypeList{}) + Expect(err).ToNot(HaveOccurred()) + Expect(gvk.Group).To(Equal(itemPointerSliceTypeGroupName)) + Expect(gvk.Version).To(Equal(itemPointerSliceTypeVersion)) + Expect(gvk.Kind).To(Equal("UnconventionalListType")) + referenceObject := &controllertest.UnconventionalListType{} + Expect(obj).To(Equal(referenceObject)) + }) + }) +}) diff --git a/pkg/cache/informertest/fake_cache.go b/pkg/cache/informertest/fake_cache.go index 93a8fbcf4c..a1a442316f 100644 --- a/pkg/cache/informertest/fake_cache.go +++ b/pkg/cache/informertest/fake_cache.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes/scheme" toolscache "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" @@ -30,7 +31,7 @@ import ( var _ cache.Cache = &FakeInformers{} -// FakeInformers is a fake implementation of Informers +// FakeInformers is a fake implementation of Informers. type FakeInformers struct { InformersByGVK map[schema.GroupVersionKind]toolscache.SharedIndexInformer Scheme *runtime.Scheme @@ -38,8 +39,8 @@ type FakeInformers struct { Synced *bool } -// GetInformerForKind implements Informers -func (c *FakeInformers) GetInformerForKind(gvk schema.GroupVersionKind) (cache.Informer, error) { +// GetInformerForKind implements Informers. +func (c *FakeInformers) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...cache.InformerGetOption) (cache.Informer, error) { if c.Scheme == nil { c.Scheme = scheme.Scheme } @@ -50,24 +51,17 @@ func (c *FakeInformers) GetInformerForKind(gvk schema.GroupVersionKind) (cache.I return c.informerFor(gvk, obj) } -// FakeInformerForKind implements Informers -func (c *FakeInformers) FakeInformerForKind(gvk schema.GroupVersionKind) (*controllertest.FakeInformer, error) { - if c.Scheme == nil { - c.Scheme = scheme.Scheme - } - obj, err := c.Scheme.New(gvk) - if err != nil { - return nil, err - } - i, err := c.informerFor(gvk, obj) +// FakeInformerForKind implements Informers. +func (c *FakeInformers) FakeInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (*controllertest.FakeInformer, error) { + i, err := c.GetInformerForKind(ctx, gvk) if err != nil { return nil, err } return i.(*controllertest.FakeInformer), nil } -// GetInformer implements Informers -func (c *FakeInformers) GetInformer(obj runtime.Object) (cache.Informer, error) { +// GetInformer implements Informers. +func (c *FakeInformers) GetInformer(ctx context.Context, obj client.Object, opts ...cache.InformerGetOption) (cache.Informer, error) { if c.Scheme == nil { c.Scheme = scheme.Scheme } @@ -79,25 +73,31 @@ func (c *FakeInformers) GetInformer(obj runtime.Object) (cache.Informer, error) return c.informerFor(gvk, obj) } -// WaitForCacheSync implements Informers -func (c *FakeInformers) WaitForCacheSync(stop <-chan struct{}) bool { - if c.Synced == nil { - return true - } - return *c.Synced -} - -// FakeInformerFor implements Informers -func (c *FakeInformers) FakeInformerFor(obj runtime.Object) (*controllertest.FakeInformer, error) { +// RemoveInformer implements Informers. +func (c *FakeInformers) RemoveInformer(ctx context.Context, obj client.Object) error { if c.Scheme == nil { c.Scheme = scheme.Scheme } gvks, _, err := c.Scheme.ObjectKinds(obj) if err != nil { - return nil, err + return err } gvk := gvks[0] - i, err := c.informerFor(gvk, obj) + delete(c.InformersByGVK, gvk) + return nil +} + +// WaitForCacheSync implements Informers. +func (c *FakeInformers) WaitForCacheSync(ctx context.Context) bool { + if c.Synced == nil { + return true + } + return *c.Synced +} + +// FakeInformerFor implements Informers. +func (c *FakeInformers) FakeInformerFor(ctx context.Context, obj client.Object) (*controllertest.FakeInformer, error) { + i, err := c.GetInformer(ctx, obj) if err != nil { return nil, err } @@ -120,22 +120,22 @@ func (c *FakeInformers) informerFor(gvk schema.GroupVersionKind, _ runtime.Objec return c.InformersByGVK[gvk], nil } -// Start implements Informers -func (c *FakeInformers) Start(stopCh <-chan struct{}) error { +// Start implements Informers. +func (c *FakeInformers) Start(ctx context.Context) error { return c.Error } -// IndexField implements Cache -func (c *FakeInformers) IndexField(obj runtime.Object, field string, extractValue client.IndexerFunc) error { +// IndexField implements Cache. +func (c *FakeInformers) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { return nil } -// Get implements Cache -func (c *FakeInformers) Get(ctx context.Context, key client.ObjectKey, obj runtime.Object) error { +// Get implements Cache. +func (c *FakeInformers) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { return nil } -// List implements Cache -func (c *FakeInformers) List(ctx context.Context, list runtime.Object, opts ...client.ListOptionFunc) error { +// List implements Cache. +func (c *FakeInformers) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { return nil } diff --git a/pkg/cache/internal/cache_reader.go b/pkg/cache/internal/cache_reader.go index 1611aa3720..eb6b544855 100644 --- a/pkg/cache/internal/cache_reader.go +++ b/pkg/cache/internal/cache_reader.go @@ -21,32 +21,46 @@ import ( "fmt" "reflect" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" apimeta "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/selection" "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" + "sigs.k8s.io/controller-runtime/pkg/internal/field/selector" ) -// CacheReader is a CacheReader +// CacheReader is a client.Reader. var _ client.Reader = &CacheReader{} -// CacheReader wraps a cache.Index to implement the client.CacheReader interface for a single type +// CacheReader wraps a cache.Index to implement the client.Reader interface for a single type. type CacheReader struct { // indexer is the underlying indexer wrapped by this cache. indexer cache.Indexer // groupVersionKind is the group-version-kind of the resource. groupVersionKind schema.GroupVersionKind + + // scopeName is the scope of the resource (namespaced or cluster-scoped). + scopeName apimeta.RESTScopeName + + // disableDeepCopy indicates not to deep copy objects during get or list objects. + // Be very careful with this, when enabled you must DeepCopy any object before mutating it, + // otherwise you will mutate the object in the cache. + disableDeepCopy bool } -// Get checks the indexer for the object and writes a copy of it if found -func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out runtime.Object) error { +// Get checks the indexer for the object and writes a copy of it if found. +func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error { + getOpts := client.GetOptions{} + getOpts.ApplyOptions(opts) + + if c.scopeName == apimeta.RESTScopeNameRoot { + key.Namespace = "" + } storeKey := objectKeyToStoreKey(key) // Lookup the object from the indexer cache @@ -57,9 +71,9 @@ func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out runtime.O // Not found, return an error if !exists { - // Resource gets transformed into Kind in the error anyway, so this is fine - return errors.NewNotFound(schema.GroupResource{ - Group: c.groupVersionKind.Group, + return apierrors.NewNotFound(schema.GroupResource{ + Group: c.groupVersionKind.Group, + // Resource gets set as Kind in the error so this is fine Resource: c.groupVersionKind.Kind, }, key.Name) } @@ -70,9 +84,13 @@ func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out runtime.O return fmt.Errorf("cache contained %T, which is not an Object", obj) } - // deep copy to avoid mutating cache - // TODO(directxman12): revisit the decision to always deepcopy - obj = obj.(runtime.Object).DeepCopyObject() + if c.disableDeepCopy || (getOpts.UnsafeDisableDeepCopy != nil && *getOpts.UnsafeDisableDeepCopy) { + // skip deep copy which might be unsafe + // you must DeepCopy any object before mutating it outside + } else { + // deep copy to avoid mutating cache + obj = obj.(runtime.Object).DeepCopyObject() + } // Copy the value of the item in the cache to the returned value // TODO(directxman12): this is a terrible hack, pls fix (we should have deepcopyinto) @@ -82,33 +100,38 @@ func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out runtime.O return fmt.Errorf("cache had type %s, but %s was asked for", objVal.Type(), outVal.Type()) } reflect.Indirect(outVal).Set(reflect.Indirect(objVal)) - out.GetObjectKind().SetGroupVersionKind(c.groupVersionKind) + if !c.disableDeepCopy && (getOpts.UnsafeDisableDeepCopy == nil || !*getOpts.UnsafeDisableDeepCopy) { + out.GetObjectKind().SetGroupVersionKind(c.groupVersionKind) + } return nil } -// List lists items out of the indexer and writes them to out -func (c *CacheReader) List(_ context.Context, out runtime.Object, opts ...client.ListOptionFunc) error { +// List lists items out of the indexer and writes them to out. +func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...client.ListOption) error { var objs []interface{} var err error listOpts := client.ListOptions{} listOpts.ApplyOptions(opts) - if listOpts.FieldSelector != nil { - // TODO(directxman12): support more complicated field selectors by - // combining multiple indicies, GetIndexers, etc - field, val, requiresExact := requiresExactMatch(listOpts.FieldSelector) + if listOpts.Continue != "" { + return fmt.Errorf("continue list option is not supported by the cache") + } + + switch { + case listOpts.FieldSelector != nil: + requiresExact := selector.RequiresExactMatch(listOpts.FieldSelector) if !requiresExact { return fmt.Errorf("non-exact field matches are not supported by the cache") } - // list all objects by the field selector. If this is namespaced and we have one, ask for the - // namespaced index key. Otherwise, ask for the non-namespaced variant by using the fake "all namespaces" + // list all objects by the field selector. If this is namespaced and we have one, ask for the + // namespaced index key. Otherwise, ask for the non-namespaced variant by using the fake "all namespaces" // namespace. - objs, err = c.indexer.ByIndex(FieldIndexName(field), KeyToNamespacedKey(listOpts.Namespace, val)) - } else if listOpts.Namespace != "" { + objs, err = byIndexes(c.indexer, listOpts.FieldSelector.Requirements(), listOpts.Namespace) + case listOpts.Namespace != "": objs, err = c.indexer.ByIndex(cache.NamespaceIndex, listOpts.Namespace) - } else { + default: objs = c.indexer.List() } if err != nil { @@ -119,25 +142,100 @@ func (c *CacheReader) List(_ context.Context, out runtime.Object, opts ...client labelSel = listOpts.LabelSelector } + limitSet := listOpts.Limit > 0 + runtimeObjs := make([]runtime.Object, 0, len(objs)) for _, item := range objs { + // if the Limit option is set and the number of items + // listed exceeds this limit, then stop reading. + if limitSet && int64(len(runtimeObjs)) >= listOpts.Limit { + break + } obj, isObj := item.(runtime.Object) if !isObj { - return fmt.Errorf("cache contained %T, which is not an Object", obj) + return fmt.Errorf("cache contained %T, which is not an Object", item) + } + meta, err := apimeta.Accessor(obj) + if err != nil { + return err + } + if labelSel != nil { + lbls := labels.Set(meta.GetLabels()) + if !labelSel.Matches(lbls) { + continue + } + } + + var outObj runtime.Object + if c.disableDeepCopy || (listOpts.UnsafeDisableDeepCopy != nil && *listOpts.UnsafeDisableDeepCopy) { + // skip deep copy which might be unsafe + // you must DeepCopy any object before mutating it outside + outObj = obj + } else { + outObj = obj.DeepCopyObject() + outObj.GetObjectKind().SetGroupVersionKind(c.groupVersionKind) } - outObj := obj.DeepCopyObject() - outObj.GetObjectKind().SetGroupVersionKind(c.groupVersionKind) runtimeObjs = append(runtimeObjs, outObj) } - filteredItems, err := objectutil.FilterWithLabels(runtimeObjs, labelSel) - if err != nil { + + if err := apimeta.SetList(out, runtimeObjs); err != nil { return err } - return apimeta.SetList(out, filteredItems) + + out.SetContinue("continue-not-supported") + return nil +} + +func byIndexes(indexer cache.Indexer, requires fields.Requirements, namespace string) ([]interface{}, error) { + var ( + err error + objs []interface{} + vals []string + ) + indexers := indexer.GetIndexers() + for idx, req := range requires { + indexName := FieldIndexName(req.Field) + indexedValue := KeyToNamespacedKey(namespace, req.Value) + if idx == 0 { + // we use first require to get snapshot data + // TODO(halfcrazy): use complicated index when client-go provides byIndexes + // https://github.com/kubernetes/kubernetes/issues/109329 + objs, err = indexer.ByIndex(indexName, indexedValue) + if err != nil { + return nil, err + } + if len(objs) == 0 { + return nil, nil + } + continue + } + fn, exist := indexers[indexName] + if !exist { + return nil, fmt.Errorf("index with name %s does not exist", indexName) + } + filteredObjects := make([]interface{}, 0, len(objs)) + for _, obj := range objs { + vals, err = fn(obj) + if err != nil { + return nil, err + } + for _, val := range vals { + if val == indexedValue { + filteredObjects = append(filteredObjects, obj) + break + } + } + } + if len(filteredObjects) == 0 { + return nil, nil + } + objs = filteredObjects + } + return objs, nil } // objectKeyToStorageKey converts an object key to store key. -// It's akin to MetaNamespaceKeyFunc. It's separate from +// It's akin to MetaNamespaceKeyFunc. It's separate from // String to allow keeping the key format easily in sync with // MetaNamespaceKeyFunc. func objectKeyToStoreKey(k client.ObjectKey) string { @@ -147,26 +245,13 @@ func objectKeyToStoreKey(k client.ObjectKey) string { return k.Namespace + "/" + k.Name } -// requiresExactMatch checks if the given field selector is of the form `k=v` or `k==v`. -func requiresExactMatch(sel fields.Selector) (field, val string, required bool) { - reqs := sel.Requirements() - if len(reqs) != 1 { - return "", "", false - } - req := reqs[0] - if req.Operator != selection.Equals && req.Operator != selection.DoubleEquals { - return "", "", false - } - return req.Field, req.Value, true -} - // FieldIndexName constructs the name of the index over the given field, // for use with an indexer. func FieldIndexName(field string) string { return "field:" + field } -// noNamespaceNamespace is used as the "namespace" when we want to list across all namespaces +// allNamespacesNamespace is used as the "namespace" when we want to list across all namespaces. const allNamespacesNamespace = "__all_namespaces" // KeyToNamespacedKey prefixes the given index key with a namespace diff --git a/pkg/cache/internal/deleg_map.go b/pkg/cache/internal/deleg_map.go deleted file mode 100644 index 978002d9cf..0000000000 --- a/pkg/cache/internal/deleg_map.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import ( - "time" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" -) - -// InformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. -// It uses a standard parameter codec constructed based on the given generated Scheme. -type InformersMap struct { - // we abstract over the details of structured vs unstructured with the specificInformerMaps - - structured *specificInformersMap - unstructured *specificInformersMap - - // Scheme maps runtime.Objects to GroupVersionKinds - Scheme *runtime.Scheme -} - -// NewInformersMap creates a new InformersMap that can create informers for -// both structured and unstructured objects. -func NewInformersMap(config *rest.Config, - scheme *runtime.Scheme, - mapper meta.RESTMapper, - resync time.Duration, - namespace string) *InformersMap { - - return &InformersMap{ - structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace), - unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace), - - Scheme: scheme, - } -} - -// Start calls Run on each of the informers and sets started to true. Blocks on the stop channel. -func (m *InformersMap) Start(stop <-chan struct{}) error { - go m.structured.Start(stop) - go m.unstructured.Start(stop) - <-stop - return nil -} - -// WaitForCacheSync waits until all the caches have been synced. -func (m *InformersMap) WaitForCacheSync(stop <-chan struct{}) bool { - syncedFuncs := append([]cache.InformerSynced(nil), m.structured.HasSyncedFuncs()...) - syncedFuncs = append(syncedFuncs, m.unstructured.HasSyncedFuncs()...) - - return cache.WaitForCacheSync(stop, syncedFuncs...) -} - -// Get will create a new Informer and add it to the map of InformersMap if none exists. Returns -// the Informer from the map. -func (m *InformersMap) Get(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, error) { - _, isUnstructured := obj.(*unstructured.Unstructured) - _, isUnstructuredList := obj.(*unstructured.UnstructuredList) - isUnstructured = isUnstructured || isUnstructuredList - - if isUnstructured { - return m.unstructured.Get(gvk, obj) - } - - return m.structured.Get(gvk, obj) -} - -// newStructuredInformersMap creates a new InformersMap for structured objects. -func newStructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, namespace string) *specificInformersMap { - return newSpecificInformersMap(config, scheme, mapper, resync, namespace, createStructuredListWatch) -} - -// newUnstructuredInformersMap creates a new InformersMap for unstructured objects. -func newUnstructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, namespace string) *specificInformersMap { - return newSpecificInformersMap(config, scheme, mapper, resync, namespace, createUnstructuredListWatch) -} diff --git a/pkg/cache/internal/informers.go b/pkg/cache/internal/informers.go new file mode 100644 index 0000000000..f216be0d9e --- /dev/null +++ b/pkg/cache/internal/informers.go @@ -0,0 +1,616 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "errors" + "fmt" + "math/rand" + "net/http" + "sync" + "time" + + "github.com/go-logr/logr" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/metadata" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/internal/syncs" +) + +var log = logf.RuntimeLog.WithName("cache") + +// InformersOpts configures an InformerMap. +type InformersOpts struct { + HTTPClient *http.Client + Scheme *runtime.Scheme + Mapper meta.RESTMapper + ResyncPeriod time.Duration + Namespace string + NewInformer func(cache.ListerWatcher, runtime.Object, time.Duration, cache.Indexers) cache.SharedIndexInformer + Selector Selector + Transform cache.TransformFunc + UnsafeDisableDeepCopy bool + EnableWatchBookmarks bool + WatchErrorHandler cache.WatchErrorHandlerWithContext +} + +// NewInformers creates a new InformersMap that can create informers under the hood. +func NewInformers(config *rest.Config, options *InformersOpts) *Informers { + newInformer := cache.NewSharedIndexInformer + if options.NewInformer != nil { + newInformer = options.NewInformer + } + return &Informers{ + config: config, + httpClient: options.HTTPClient, + scheme: options.Scheme, + mapper: options.Mapper, + tracker: tracker{ + Structured: make(map[schema.GroupVersionKind]*Cache), + Unstructured: make(map[schema.GroupVersionKind]*Cache), + Metadata: make(map[schema.GroupVersionKind]*Cache), + }, + codecs: serializer.NewCodecFactory(options.Scheme), + paramCodec: runtime.NewParameterCodec(options.Scheme), + resync: options.ResyncPeriod, + startWait: make(chan struct{}), + namespace: options.Namespace, + selector: options.Selector, + transform: options.Transform, + unsafeDisableDeepCopy: options.UnsafeDisableDeepCopy, + enableWatchBookmarks: options.EnableWatchBookmarks, + newInformer: newInformer, + watchErrorHandler: options.WatchErrorHandler, + } +} + +// Cache contains the cached data for an Cache. +type Cache struct { + // Informer is the cached informer + Informer cache.SharedIndexInformer + + // CacheReader wraps Informer and implements the CacheReader interface for a single type + Reader CacheReader + + // Stop can be used to stop this individual informer. + stop chan struct{} +} + +// Start starts the informer managed by a MapEntry. +// Blocks until the informer stops. The informer can be stopped +// either individually (via the entry's stop channel) or globally +// via the provided stop argument. +func (c *Cache) Start(stop <-chan struct{}) { + // Stop on either the whole map stopping or just this informer being removed. + internalStop, cancel := syncs.MergeChans(stop, c.stop) + defer cancel() + // Convert the stop channel to a context and then add the logger. + c.Informer.RunWithContext(logr.NewContext(wait.ContextForChannel(internalStop), log)) +} + +type tracker struct { + Structured map[schema.GroupVersionKind]*Cache + Unstructured map[schema.GroupVersionKind]*Cache + Metadata map[schema.GroupVersionKind]*Cache +} + +// GetOptions provides configuration to customize the behavior when +// getting an informer. +type GetOptions struct { + // BlockUntilSynced controls if the informer retrieval will block until the informer is synced. Defaults to `true`. + BlockUntilSynced *bool +} + +// Informers create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. +// It uses a standard parameter codec constructed based on the given generated Scheme. +type Informers struct { + // httpClient is used to create a new REST client + httpClient *http.Client + + // scheme maps runtime.Objects to GroupVersionKinds + scheme *runtime.Scheme + + // config is used to talk to the apiserver + config *rest.Config + + // mapper maps GroupVersionKinds to Resources + mapper meta.RESTMapper + + // tracker tracks informers keyed by their type and groupVersionKind + tracker tracker + + // codecs is used to create a new REST client + codecs serializer.CodecFactory + + // paramCodec is used by list and watch + paramCodec runtime.ParameterCodec + + // resync is the base frequency the informers are resynced + // a 10 percent jitter will be added to the resync period between informers + // so that all informers will not send list requests simultaneously. + resync time.Duration + + // mu guards access to the map + mu sync.RWMutex + + // started is true if the informers have been started + started bool + + // startWait is a channel that is closed after the + // informer has been started. + startWait chan struct{} + + // waitGroup is the wait group that is used to wait for all informers to stop + waitGroup sync.WaitGroup + + // stopped is true if the informers have been stopped + stopped bool + + // ctx is the context to stop informers + ctx context.Context + + // namespace is the namespace that all ListWatches are restricted to + // default or empty string means all namespaces + namespace string + + selector Selector + transform cache.TransformFunc + unsafeDisableDeepCopy bool + enableWatchBookmarks bool + + // NewInformer allows overriding of the shared index informer constructor for testing. + newInformer func(cache.ListerWatcher, runtime.Object, time.Duration, cache.Indexers) cache.SharedIndexInformer + + // watchErrorHandler allows the shared index informer's + // watchErrorHandler to be set by overriding the options + // or to use the default watchErrorHandler + watchErrorHandler cache.WatchErrorHandlerWithContext +} + +// Start calls Run on each of the informers and sets started to true. Blocks on the context. +// It doesn't return start because it can't return an error, and it's not a runnable directly. +func (ip *Informers) Start(ctx context.Context) error { + if err := func() error { + ip.mu.Lock() + defer ip.mu.Unlock() + + if ip.started { + return errors.New("informer already started") //nolint:stylecheck + } + + // Set the context so it can be passed to informers that are added later + ip.ctx = ctx + + // Start each informer + for _, i := range ip.tracker.Structured { + ip.startInformerLocked(i) + } + for _, i := range ip.tracker.Unstructured { + ip.startInformerLocked(i) + } + for _, i := range ip.tracker.Metadata { + ip.startInformerLocked(i) + } + + // Set started to true so we immediately start any informers added later. + ip.started = true + close(ip.startWait) + + return nil + }(); err != nil { + return err + } + <-ctx.Done() // Block until the context is done + ip.mu.Lock() + ip.stopped = true // Set stopped to true so we don't start any new informers + ip.mu.Unlock() + ip.waitGroup.Wait() // Block until all informers have stopped + return nil +} + +func (ip *Informers) startInformerLocked(cacheEntry *Cache) { + // Don't start the informer in case we are already waiting for the items in + // the waitGroup to finish, since waitGroups don't support waiting and adding + // at the same time. + if ip.stopped { + return + } + + ip.waitGroup.Add(1) + go func() { + defer ip.waitGroup.Done() + cacheEntry.Start(ip.ctx.Done()) + }() +} + +func (ip *Informers) waitForStarted(ctx context.Context) bool { + select { + case <-ip.startWait: + return true + case <-ctx.Done(): + return false + } +} + +// getHasSyncedFuncs returns all the HasSynced functions for the informers in this map. +func (ip *Informers) getHasSyncedFuncs() []cache.InformerSynced { + ip.mu.RLock() + defer ip.mu.RUnlock() + + res := make([]cache.InformerSynced, 0, + len(ip.tracker.Structured)+len(ip.tracker.Unstructured)+len(ip.tracker.Metadata), + ) + for _, i := range ip.tracker.Structured { + res = append(res, i.Informer.HasSynced) + } + for _, i := range ip.tracker.Unstructured { + res = append(res, i.Informer.HasSynced) + } + for _, i := range ip.tracker.Metadata { + res = append(res, i.Informer.HasSynced) + } + return res +} + +// WaitForCacheSync waits until all the caches have been started and synced. +func (ip *Informers) WaitForCacheSync(ctx context.Context) bool { + if !ip.waitForStarted(ctx) { + return false + } + return cache.WaitForCacheSync(ctx.Done(), ip.getHasSyncedFuncs()...) +} + +// Peek attempts to get the informer for the GVK, but does not start one if one does not exist. +func (ip *Informers) Peek(gvk schema.GroupVersionKind, obj runtime.Object) (res *Cache, started bool, ok bool) { + ip.mu.RLock() + defer ip.mu.RUnlock() + i, ok := ip.informersByType(obj)[gvk] + return i, ip.started, ok +} + +// Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns +// the Informer from the map. +func (ip *Informers) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object, opts *GetOptions) (bool, *Cache, error) { + // Return the informer if it is found + i, started, ok := ip.Peek(gvk, obj) + if !ok { + var err error + if i, started, err = ip.addInformerToMap(gvk, obj); err != nil { + return started, nil, err + } + } + + shouldBlock := true + if opts.BlockUntilSynced != nil { + shouldBlock = *opts.BlockUntilSynced + } + + if shouldBlock && started && !i.Informer.HasSynced() { + // Wait for it to sync before returning the Informer so that folks don't read from a stale cache. + if !cache.WaitForCacheSync(ctx.Done(), i.Informer.HasSynced) { + return started, nil, apierrors.NewTimeoutError(fmt.Sprintf("failed waiting for %T Informer to sync", obj), 0) + } + } + + return started, i, nil +} + +// Remove removes an informer entry and stops it if it was running. +func (ip *Informers) Remove(gvk schema.GroupVersionKind, obj runtime.Object) { + ip.mu.Lock() + defer ip.mu.Unlock() + + informerMap := ip.informersByType(obj) + + entry, ok := informerMap[gvk] + if !ok { + return + } + close(entry.stop) + delete(informerMap, gvk) +} + +func (ip *Informers) informersByType(obj runtime.Object) map[schema.GroupVersionKind]*Cache { + switch obj.(type) { + case runtime.Unstructured: + return ip.tracker.Unstructured + case *metav1.PartialObjectMetadata, *metav1.PartialObjectMetadataList: + return ip.tracker.Metadata + default: + return ip.tracker.Structured + } +} + +// addInformerToMap either returns an existing informer or creates a new informer, adds it to the map and returns it. +func (ip *Informers) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.Object) (*Cache, bool, error) { + ip.mu.Lock() + defer ip.mu.Unlock() + + // Check the cache to see if we already have an Informer. If we do, return the Informer. + // This is for the case where 2 routines tried to get the informer when it wasn't in the map + // so neither returned early, but the first one created it. + if i, ok := ip.informersByType(obj)[gvk]; ok { + return i, ip.started, nil + } + + // Create a NewSharedIndexInformer and add it to the map. + listWatcher, err := ip.makeListWatcher(gvk, obj) + if err != nil { + return nil, false, err + } + sharedIndexInformer := ip.newInformer(&cache.ListWatch{ + ListWithContextFunc: func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) { + ip.selector.ApplyToList(&opts) + return listWatcher.ListWithContextFunc(ctx, opts) + }, + WatchFuncWithContext: func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + opts.Watch = true // Watch needs to be set to true separately + opts.AllowWatchBookmarks = ip.enableWatchBookmarks + + ip.selector.ApplyToList(&opts) + return listWatcher.WatchFuncWithContext(ctx, opts) + }, + }, obj, calculateResyncPeriod(ip.resync), cache.Indexers{ + cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, + }) + + // Set WatchErrorHandler on SharedIndexInformer if set + if ip.watchErrorHandler != nil { + if err := sharedIndexInformer.SetWatchErrorHandlerWithContext(ip.watchErrorHandler); err != nil { + return nil, false, err + } + } + + // Check to see if there is a transformer for this gvk + if err := sharedIndexInformer.SetTransform(ip.transform); err != nil { + return nil, false, err + } + + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, false, err + } + + // Create the new entry and set it in the map. + i := &Cache{ + Informer: sharedIndexInformer, + Reader: CacheReader{ + indexer: sharedIndexInformer.GetIndexer(), + groupVersionKind: gvk, + scopeName: mapping.Scope.Name(), + disableDeepCopy: ip.unsafeDisableDeepCopy, + }, + stop: make(chan struct{}), + } + ip.informersByType(obj)[gvk] = i + + // Start the informer in case the InformersMap has started, otherwise it will be + // started when the InformersMap starts. + if ip.started { + ip.startInformerLocked(i) + } + return i, ip.started, nil +} + +func (ip *Informers) makeListWatcher(gvk schema.GroupVersionKind, obj runtime.Object) (*cache.ListWatch, error) { + // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the + // groupVersionKind to the Resource API we will use. + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + + // Figure out if the GVK we're dealing with is global, or namespace scoped. + var namespace string + if mapping.Scope.Name() == meta.RESTScopeNameNamespace { + namespace = restrictNamespaceBySelector(ip.namespace, ip.selector) + } + + switch obj.(type) { + // + // Unstructured + // + case runtime.Unstructured: + // If the rest configuration has a negotiated serializer passed in, + // we should remove it and use the one that the dynamic client sets for us. + cfg := rest.CopyConfig(ip.config) + cfg.NegotiatedSerializer = nil + dynamicClient, err := dynamic.NewForConfigAndClient(cfg, ip.httpClient) + if err != nil { + return nil, err + } + resources := dynamicClient.Resource(mapping.Resource) + return &cache.ListWatch{ + ListWithContextFunc: func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) { + if namespace != "" { + return resources.Namespace(namespace).List(ctx, opts) + } + return resources.List(ctx, opts) + }, + // Setup the watch function + WatchFuncWithContext: func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + opts.Watch = true // Watch needs to be set to true separately + opts.AllowWatchBookmarks = ip.enableWatchBookmarks + + if namespace != "" { + return resources.Namespace(namespace).Watch(ctx, opts) + } + return resources.Watch(ctx, opts) + }, + }, nil + // + // Metadata + // + case *metav1.PartialObjectMetadata, *metav1.PartialObjectMetadataList: + // Always clear the negotiated serializer and use the one + // set from the metadata client. + cfg := rest.CopyConfig(ip.config) + cfg.NegotiatedSerializer = nil + + // Grab the metadata metadataClient. + metadataClient, err := metadata.NewForConfigAndClient(cfg, ip.httpClient) + if err != nil { + return nil, err + } + resources := metadataClient.Resource(mapping.Resource) + + return &cache.ListWatch{ + ListWithContextFunc: func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) { + var ( + list *metav1.PartialObjectMetadataList + err error + ) + if namespace != "" { + list, err = resources.Namespace(namespace).List(ctx, opts) + } else { + list, err = resources.List(ctx, opts) + } + if list != nil { + for i := range list.Items { + list.Items[i].SetGroupVersionKind(gvk) + } + } + return list, err + }, + // Setup the watch function + WatchFuncWithContext: func(ctx context.Context, opts metav1.ListOptions) (watcher watch.Interface, err error) { + opts.Watch = true // Watch needs to be set to true separately + opts.AllowWatchBookmarks = ip.enableWatchBookmarks + + if namespace != "" { + watcher, err = resources.Namespace(namespace).Watch(ctx, opts) + } else { + watcher, err = resources.Watch(ctx, opts) + } + if err != nil { + return nil, err + } + return newGVKFixupWatcher(gvk, watcher), nil + }, + }, nil + // + // Structured. + // + default: + client, err := apiutil.RESTClientForGVK(gvk, false, false, ip.config, ip.codecs, ip.httpClient) + if err != nil { + return nil, err + } + listGVK := gvk.GroupVersion().WithKind(gvk.Kind + "List") + listObj, err := ip.scheme.New(listGVK) + if err != nil { + return nil, err + } + return &cache.ListWatch{ + ListWithContextFunc: func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) { + // Build the request. + req := client.Get().Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec) + if namespace != "" { + req.Namespace(namespace) + } + + // Create the resulting object, and execute the request. + res := listObj.DeepCopyObject() + if err := req.Do(ctx).Into(res); err != nil { + return nil, err + } + return res, nil + }, + // Setup the watch function + WatchFuncWithContext: func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + opts.Watch = true // Watch needs to be set to true separately + opts.AllowWatchBookmarks = ip.enableWatchBookmarks + + // Build the request. + req := client.Get().Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec) + if namespace != "" { + req.Namespace(namespace) + } + // Call the watch. + return req.Watch(ctx) + }, + }, nil + } +} + +// newGVKFixupWatcher adds a wrapper that preserves the GVK information when +// events come in. +// +// This works around a bug where GVK information is not passed into mapping +// functions when using the OnlyMetadata option in the builder. +// This issue is most likely caused by kubernetes/kubernetes#80609. +// See kubernetes-sigs/controller-runtime#1484. +// +// This was originally implemented as a cache.ResourceEventHandler wrapper but +// that contained a data race which was resolved by setting the GVK in a watch +// wrapper, before the objects are written to the cache. +// See kubernetes-sigs/controller-runtime#1650. +// +// The original watch wrapper was found to be incompatible with +// k8s.io/client-go/tools/cache.Reflector so it has been re-implemented as a +// watch.Filter which is compatible. +// See kubernetes-sigs/controller-runtime#1789. +func newGVKFixupWatcher(gvk schema.GroupVersionKind, watcher watch.Interface) watch.Interface { + return watch.Filter( + watcher, + func(in watch.Event) (watch.Event, bool) { + in.Object.GetObjectKind().SetGroupVersionKind(gvk) + return in, true + }, + ) +} + +// calculateResyncPeriod returns a duration based on the desired input +// this is so that multiple controllers don't get into lock-step and all +// hammer the apiserver with list requests simultaneously. +func calculateResyncPeriod(resync time.Duration) time.Duration { + // the factor will fall into [0.9, 1.1) + factor := rand.Float64()/5.0 + 0.9 + return time.Duration(float64(resync.Nanoseconds()) * factor) +} + +// restrictNamespaceBySelector returns either a global restriction for all ListWatches +// if not default/empty, or the namespace that a ListWatch for the specific resource +// is restricted to, based on a specified field selector for metadata.namespace field. +func restrictNamespaceBySelector(namespaceOpt string, s Selector) string { + if namespaceOpt != "" { + // namespace is already restricted + return namespaceOpt + } + fieldSelector := s.Field + if fieldSelector == nil || fieldSelector.Empty() { + return "" + } + // check whether a selector includes the namespace field + value, found := fieldSelector.RequiresExactMatch("metadata.namespace") + if found { + return value + } + return "" +} diff --git a/pkg/cache/internal/informers_map.go b/pkg/cache/internal/informers_map.go deleted file mode 100644 index b2787adfc8..0000000000 --- a/pkg/cache/internal/informers_map.go +++ /dev/null @@ -1,276 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import ( - "fmt" - "sync" - "time" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" -) - -// clientListWatcherFunc knows how to create a ListWatcher -type createListWatcherFunc func(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) - -// newSpecificInformersMap returns a new specificInformersMap (like -// the generical InformersMap, except that it doesn't implement WaitForCacheSync). -func newSpecificInformersMap(config *rest.Config, - scheme *runtime.Scheme, - mapper meta.RESTMapper, - resync time.Duration, - namespace string, - createListWatcher createListWatcherFunc) *specificInformersMap { - ip := &specificInformersMap{ - config: config, - Scheme: scheme, - mapper: mapper, - informersByGVK: make(map[schema.GroupVersionKind]*MapEntry), - codecs: serializer.NewCodecFactory(scheme), - paramCodec: runtime.NewParameterCodec(scheme), - resync: resync, - createListWatcher: createListWatcher, - namespace: namespace, - } - return ip -} - -// MapEntry contains the cached data for an Informer -type MapEntry struct { - // Informer is the cached informer - Informer cache.SharedIndexInformer - - // CacheReader wraps Informer and implements the CacheReader interface for a single type - Reader CacheReader -} - -// specificInformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. -// It uses a standard parameter codec constructed based on the given generated Scheme. -type specificInformersMap struct { - // Scheme maps runtime.Objects to GroupVersionKinds - Scheme *runtime.Scheme - - // config is used to talk to the apiserver - config *rest.Config - - // mapper maps GroupVersionKinds to Resources - mapper meta.RESTMapper - - // informersByGVK is the cache of informers keyed by groupVersionKind - informersByGVK map[schema.GroupVersionKind]*MapEntry - - // codecs is used to create a new REST client - codecs serializer.CodecFactory - - // paramCodec is used by list and watch - paramCodec runtime.ParameterCodec - - // stop is the stop channel to stop informers - stop <-chan struct{} - - // resync is the frequency the informers are resynced - resync time.Duration - - // mu guards access to the map - mu sync.RWMutex - - // start is true if the informers have been started - started bool - - // createClient knows how to create a client and a list object, - // and allows for abstracting over the particulars of structured vs - // unstructured objects. - createListWatcher createListWatcherFunc - - // namespace is the namespace that all ListWatches are restricted to - // default or empty string means all namespaces - namespace string -} - -// Start calls Run on each of the informers and sets started to true. Blocks on the stop channel. -// It doesn't return start because it can't return an error, and it's not a runnable directly. -func (ip *specificInformersMap) Start(stop <-chan struct{}) { - func() { - ip.mu.Lock() - defer ip.mu.Unlock() - - // Set the stop channel so it can be passed to informers that are added later - ip.stop = stop - - // Start each informer - for _, informer := range ip.informersByGVK { - go informer.Informer.Run(stop) - } - - // Set started to true so we immediately start any informers added later. - ip.started = true - }() - <-stop -} - -// HasSyncedFuncs returns all the HasSynced functions for the informers in this map. -func (ip *specificInformersMap) HasSyncedFuncs() []cache.InformerSynced { - ip.mu.RLock() - defer ip.mu.RUnlock() - syncedFuncs := make([]cache.InformerSynced, 0, len(ip.informersByGVK)) - for _, informer := range ip.informersByGVK { - syncedFuncs = append(syncedFuncs, informer.Informer.HasSynced) - } - return syncedFuncs -} - -// Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns -// the Informer from the map. -func (ip *specificInformersMap) Get(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, error) { - // Return the informer if it is found - i, started, ok := func() (*MapEntry, bool, bool) { - ip.mu.RLock() - defer ip.mu.RUnlock() - i, ok := ip.informersByGVK[gvk] - return i, ip.started, ok - }() - - if !ok { - var err error - if i, started, err = ip.addInformerToMap(gvk, obj); err != nil { - return nil, err - } - } - - if started && !i.Informer.HasSynced() { - // Wait for it to sync before returning the Informer so that folks don't read from a stale cache. - if !cache.WaitForCacheSync(ip.stop, i.Informer.HasSynced) { - return nil, fmt.Errorf("failed waiting for %T Informer to sync", obj) - } - } - - return i, nil -} - -func (ip *specificInformersMap) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, bool, error) { - ip.mu.Lock() - defer ip.mu.Unlock() - - // Check the cache to see if we already have an Informer. If we do, return the Informer. - // This is for the case where 2 routines tried to get the informer when it wasn't in the map - // so neither returned early, but the first one created it. - if i, ok := ip.informersByGVK[gvk]; ok { - return i, ip.started, nil - } - - // Create a NewSharedIndexInformer and add it to the map. - var lw *cache.ListWatch - lw, err := ip.createListWatcher(gvk, ip) - if err != nil { - return nil, false, err - } - ni := cache.NewSharedIndexInformer(lw, obj, ip.resync, cache.Indexers{ - cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, - }) - i := &MapEntry{ - Informer: ni, - Reader: CacheReader{indexer: ni.GetIndexer(), groupVersionKind: gvk}, - } - ip.informersByGVK[gvk] = i - - // Start the Informer if need by - // TODO(seans): write thorough tests and document what happens here - can you add indexers? - // can you add eventhandlers? - if ip.started { - go i.Informer.Run(ip.stop) - } - return i, ip.started, nil -} - -// newListWatch returns a new ListWatch object that can be used to create a SharedIndexInformer. -func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { - // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the - // groupVersionKind to the Resource API we will use. - mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, err - } - - client, err := apiutil.RESTClientForGVK(gvk, ip.config, ip.codecs) - if err != nil { - return nil, err - } - listGVK := gvk.GroupVersion().WithKind(gvk.Kind + "List") - listObj, err := ip.Scheme.New(listGVK) - if err != nil { - return nil, err - } - - // Create a new ListWatch for the obj - return &cache.ListWatch{ - ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { - res := listObj.DeepCopyObject() - isNamespaceScoped := ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot - err := client.Get().NamespaceIfScoped(ip.namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Do().Into(res) - return res, err - }, - // Setup the watch function - WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { - // Watch needs to be set to true separately - opts.Watch = true - isNamespaceScoped := ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot - return client.Get().NamespaceIfScoped(ip.namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Watch() - }, - }, nil -} - -func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { - // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the - // groupVersionKind to the Resource API we will use. - mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, err - } - dynamicClient, err := dynamic.NewForConfig(ip.config) - if err != nil { - return nil, err - } - - // Create a new ListWatch for the obj - return &cache.ListWatch{ - ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { - if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - return dynamicClient.Resource(mapping.Resource).Namespace(ip.namespace).List(opts) - } - return dynamicClient.Resource(mapping.Resource).List(opts) - }, - // Setup the watch function - WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { - // Watch needs to be set to true separately - opts.Watch = true - if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - return dynamicClient.Resource(mapping.Resource).Namespace(ip.namespace).Watch(opts) - } - return dynamicClient.Resource(mapping.Resource).Watch(opts) - }, - }, nil -} diff --git a/pkg/cache/internal/informers_test.go b/pkg/cache/internal/informers_test.go new file mode 100644 index 0000000000..854a39c1f1 --- /dev/null +++ b/pkg/cache/internal/informers_test.go @@ -0,0 +1,94 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" +) + +// Test that gvkFixupWatcher behaves like watch.FakeWatcher +// and that it overrides the GVK. +// These tests are adapted from the watch.FakeWatcher tests in: +// https://github.com/kubernetes/kubernetes/blob/adbda068c1808fcc8a64a94269e0766b5c46ec41/staging/src/k8s.io/apimachinery/pkg/watch/watch_test.go#L33-L78 +var _ = Describe("gvkFixupWatcher", func() { + It("behaves like watch.FakeWatcher", func() { + newTestType := func(name string) runtime.Object { + return &metav1.PartialObjectMetadata{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + } + + f := watch.NewFake() + // This is the GVK which we expect the wrapper to set on all the events + expectedGVK := schema.GroupVersionKind{ + Group: "testgroup", + Version: "v1test2", + Kind: "TestKind", + } + gvkfw := newGVKFixupWatcher(expectedGVK, f) + + table := []struct { + t watch.EventType + s runtime.Object + }{ + {watch.Added, newTestType("foo")}, + {watch.Modified, newTestType("qux")}, + {watch.Modified, newTestType("bar")}, + {watch.Deleted, newTestType("bar")}, + {watch.Error, newTestType("error: blah")}, + } + + consumer := func(w watch.Interface) { + for _, expect := range table { + By(fmt.Sprintf("Fixing up watch.EventType: %v and passing it on", expect.t)) + got, ok := <-w.ResultChan() + Expect(ok).To(BeTrue(), "closed early") + Expect(expect.t).To(Equal(got.Type), "unexpected Event.Type or out-of-order Event") + Expect(got.Object).To(BeAssignableToTypeOf(&metav1.PartialObjectMetadata{}), "unexpected Event.Object type") + a := got.Object.(*metav1.PartialObjectMetadata) + Expect(got.Object.GetObjectKind().GroupVersionKind()).To(Equal(expectedGVK), "GVK was not fixed up") + expected := expect.s.DeepCopyObject() + expected.GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{}) + actual := a.DeepCopyObject() + actual.GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{}) + Expect(actual).To(Equal(expected), "unexpected change to the Object") + } + Eventually(w.ResultChan()).Should(BeClosed()) + } + + sender := func() { + f.Add(newTestType("foo")) + f.Action(watch.Modified, newTestType("qux")) + f.Modify(newTestType("bar")) + f.Delete(newTestType("bar")) + f.Error(newTestType("error: blah")) + f.Stop() + } + + go sender() + consumer(gvkfw) + }) +}) diff --git a/pkg/runtime/inject/inject_suite_test.go b/pkg/cache/internal/internal_suite_test.go similarity index 73% rename from pkg/runtime/inject/inject_suite_test.go rename to pkg/cache/internal/internal_suite_test.go index 8193ce209f..25ec0f1dbc 100644 --- a/pkg/runtime/inject/inject_suite_test.go +++ b/pkg/cache/internal/internal_suite_test.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,17 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package inject +package internal import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "sigs.k8s.io/controller-runtime/pkg/envtest" ) func TestSource(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, "Runtime Injection Suite", []Reporter{envtest.NewlineReporter{}}) + RunSpecs(t, "Cache Internal Suite") } diff --git a/pkg/cache/internal/selector.go b/pkg/cache/internal/selector.go new file mode 100644 index 0000000000..c674379b99 --- /dev/null +++ b/pkg/cache/internal/selector.go @@ -0,0 +1,39 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" +) + +// Selector specify the label/field selector to fill in ListOptions. +type Selector struct { + Label labels.Selector + Field fields.Selector +} + +// ApplyToList fill in ListOptions LabelSelector and FieldSelector if needed. +func (s Selector) ApplyToList(listOpts *metav1.ListOptions) { + if s.Label != nil { + listOpts.LabelSelector = s.Label.String() + } + if s.Field != nil { + listOpts.FieldSelector = s.Field.String() + } +} diff --git a/pkg/cache/multi_namespace_cache.go b/pkg/cache/multi_namespace_cache.go index fedb426536..d7d7b0e7c2 100644 --- a/pkg/cache/multi_namespace_cache.go +++ b/pkg/cache/multi_namespace_cache.go @@ -22,37 +22,43 @@ import ( "time" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/rest" toolscache "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -// NewCacheFunc - Function for creating a new cache from the options and a rest config -type NewCacheFunc func(config *rest.Config, opts Options) (Cache, error) +// a new global namespaced cache to handle cluster scoped resources. +const globalCache = "_cluster-scope" -// MultiNamespacedCacheBuilder - Builder function to create a new multi-namespaced cache. -// This will scope the cache to a list of namespaces. Listing for all namespaces -// will list for all the namespaces that this knows about. -func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc { - return func(config *rest.Config, opts Options) (Cache, error) { - opts, err := defaultOpts(config, opts) - if err != nil { - return nil, err - } - caches := map[string]Cache{} - for _, ns := range namespaces { - opts.Namespace = ns - c, err := New(config, opts) - if err != nil { - return nil, err - } - caches[ns] = c - } - return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme}, nil +func newMultiNamespaceCache( + newCache newCacheFunc, + scheme *runtime.Scheme, + restMapper apimeta.RESTMapper, + namespaces map[string]Config, + globalConfig *Config, // may be nil in which case no cache for cluster-scoped objects will be created +) Cache { + // Create every namespace cache. + caches := map[string]Cache{} + for namespace, config := range namespaces { + caches[namespace] = newCache(config, namespace) + } + + // Create a cache for cluster scoped resources if requested + var clusterCache Cache + if globalConfig != nil { + clusterCache = newCache(*globalConfig, corev1.NamespaceAll) + } + + return &multiNamespaceCache{ + namespaceToCache: caches, + Scheme: scheme, + RESTMapper: restMapper, + clusterCache: clusterCache, } } @@ -61,102 +67,226 @@ func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc { // operator to a list of namespaces instead of watching every namespace // in the cluster. type multiNamespaceCache struct { - namespaceToCache map[string]Cache Scheme *runtime.Scheme + RESTMapper apimeta.RESTMapper + namespaceToCache map[string]Cache + clusterCache Cache } var _ Cache = &multiNamespaceCache{} -// Methods for multiNamespaceCache to conform to the Informers interface -func (c *multiNamespaceCache) GetInformer(obj runtime.Object) (Informer, error) { - informers := map[string]Informer{} +// Methods for multiNamespaceCache to conform to the Informers interface. + +func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj client.Object, opts ...InformerGetOption) (Informer, error) { + // If the object is cluster scoped, get the informer from clusterCache, + // if not use the namespaced caches. + isNamespaced, err := apiutil.IsObjectNamespaced(obj, c.Scheme, c.RESTMapper) + if err != nil { + return nil, err + } + if !isNamespaced { + clusterCacheInformer, err := c.clusterCache.GetInformer(ctx, obj, opts...) + if err != nil { + return nil, err + } + + return &multiNamespaceInformer{ + namespaceToInformer: map[string]Informer{ + globalCache: clusterCacheInformer, + }, + }, nil + } + + namespaceToInformer := map[string]Informer{} for ns, cache := range c.namespaceToCache { - informer, err := cache.GetInformer(obj) + informer, err := cache.GetInformer(ctx, obj, opts...) if err != nil { return nil, err } - informers[ns] = informer + namespaceToInformer[ns] = informer } - return &multiNamespaceInformer{namespaceToInformer: informers}, nil + + return &multiNamespaceInformer{namespaceToInformer: namespaceToInformer}, nil } -func (c *multiNamespaceCache) GetInformerForKind(gvk schema.GroupVersionKind) (Informer, error) { - informers := map[string]Informer{} +func (c *multiNamespaceCache) RemoveInformer(ctx context.Context, obj client.Object) error { + // If the object is clusterscoped, get the informer from clusterCache, + // if not use the namespaced caches. + isNamespaced, err := apiutil.IsObjectNamespaced(obj, c.Scheme, c.RESTMapper) + if err != nil { + return err + } + if !isNamespaced { + return c.clusterCache.RemoveInformer(ctx, obj) + } + + for _, cache := range c.namespaceToCache { + err := cache.RemoveInformer(ctx, obj) + if err != nil { + return err + } + } + + return nil +} + +func (c *multiNamespaceCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...InformerGetOption) (Informer, error) { + // If the object is cluster scoped, get the informer from clusterCache, + // if not use the namespaced caches. + isNamespaced, err := apiutil.IsGVKNamespaced(gvk, c.RESTMapper) + if err != nil { + return nil, err + } + if !isNamespaced { + clusterCacheInformer, err := c.clusterCache.GetInformerForKind(ctx, gvk, opts...) + if err != nil { + return nil, err + } + + return &multiNamespaceInformer{ + namespaceToInformer: map[string]Informer{ + globalCache: clusterCacheInformer, + }, + }, nil + } + + namespaceToInformer := map[string]Informer{} for ns, cache := range c.namespaceToCache { - informer, err := cache.GetInformerForKind(gvk) + informer, err := cache.GetInformerForKind(ctx, gvk, opts...) if err != nil { return nil, err } - informers[ns] = informer + namespaceToInformer[ns] = informer } - return &multiNamespaceInformer{namespaceToInformer: informers}, nil + + return &multiNamespaceInformer{namespaceToInformer: namespaceToInformer}, nil } -func (c *multiNamespaceCache) Start(stopCh <-chan struct{}) error { +func (c *multiNamespaceCache) Start(ctx context.Context) error { + errs := make(chan error) + // start global cache + if c.clusterCache != nil { + go func() { + err := c.clusterCache.Start(ctx) + if err != nil { + errs <- fmt.Errorf("failed to start cluster-scoped cache: %w", err) + } + }() + } + + // start namespaced caches for ns, cache := range c.namespaceToCache { go func(ns string, cache Cache) { - err := cache.Start(stopCh) - if err != nil { - log.Error(err, "multinamespace cache failed to start namespaced informer", "namespace", ns) + if err := cache.Start(ctx); err != nil { + errs <- fmt.Errorf("failed to start cache for namespace %s: %w", ns, err) } }(ns, cache) } - <-stopCh - return nil + select { + case <-ctx.Done(): + return nil + case err := <-errs: + return err + } } -func (c *multiNamespaceCache) WaitForCacheSync(stop <-chan struct{}) bool { +func (c *multiNamespaceCache) WaitForCacheSync(ctx context.Context) bool { synced := true for _, cache := range c.namespaceToCache { - if s := cache.WaitForCacheSync(stop); !s { - synced = s + if !cache.WaitForCacheSync(ctx) { + synced = false } } + + // check if cluster scoped cache has synced + if c.clusterCache != nil && !c.clusterCache.WaitForCacheSync(ctx) { + synced = false + } return synced } -func (c *multiNamespaceCache) IndexField(obj runtime.Object, field string, extractValue client.IndexerFunc) error { +func (c *multiNamespaceCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { + isNamespaced, err := apiutil.IsObjectNamespaced(obj, c.Scheme, c.RESTMapper) + if err != nil { + return err + } + + if !isNamespaced { + return c.clusterCache.IndexField(ctx, obj, field, extractValue) + } + for _, cache := range c.namespaceToCache { - if err := cache.IndexField(obj, field, extractValue); err != nil { + if err := cache.IndexField(ctx, obj, field, extractValue); err != nil { return err } } return nil } -func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj runtime.Object) error { +func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + isNamespaced, err := apiutil.IsObjectNamespaced(obj, c.Scheme, c.RESTMapper) + if err != nil { + return err + } + + if !isNamespaced { + // Look into the global cache to fetch the object + return c.clusterCache.Get(ctx, key, obj) + } + cache, ok := c.namespaceToCache[key.Namespace] if !ok { + if global, hasGlobal := c.namespaceToCache[metav1.NamespaceAll]; hasGlobal { + return global.Get(ctx, key, obj, opts...) + } return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", key) } - return cache.Get(ctx, key, obj) + return cache.Get(ctx, key, obj, opts...) } // List multi namespace cache will get all the objects in the namespaces that the cache is watching if asked for all namespaces. -func (c *multiNamespaceCache) List(ctx context.Context, list runtime.Object, opts ...client.ListOptionFunc) error { +func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { listOpts := client.ListOptions{} listOpts.ApplyOptions(opts) + + if listOpts.Continue != "" { + return fmt.Errorf("continue list option is not supported by the cache") + } + + isNamespaced, err := apiutil.IsObjectNamespaced(list, c.Scheme, c.RESTMapper) + if err != nil { + return err + } + + if !isNamespaced { + // Look at the global cache to get the objects with the specified GVK + return c.clusterCache.List(ctx, list, opts...) + } + if listOpts.Namespace != corev1.NamespaceAll { cache, ok := c.namespaceToCache[listOpts.Namespace] if !ok { - return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", listOpts.Namespace) + if global, hasGlobal := c.namespaceToCache[AllNamespaces]; hasGlobal { + return global.List(ctx, list, opts...) + } + return fmt.Errorf("unable to list: %v because of unknown namespace for the cache", listOpts.Namespace) } return cache.List(ctx, list, opts...) } - listAccessor, err := meta.ListAccessor(list) + listAccessor, err := apimeta.ListAccessor(list) if err != nil { return err } - allItems, err := apimeta.ExtractList(list) - if err != nil { - return err - } + allItems := []runtime.Object{} + + limitSet := listOpts.Limit > 0 + var resourceVersion string for _, cache := range c.namespaceToCache { - listObj := list.DeepCopyObject() - err = cache.List(ctx, listObj, opts...) + listObj := list.DeepCopyObject().(client.ObjectList) + err = cache.List(ctx, listObj, &listOpts) if err != nil { return err } @@ -164,41 +294,128 @@ func (c *multiNamespaceCache) List(ctx context.Context, list runtime.Object, opt if err != nil { return err } - accessor, err := meta.ListAccessor(listObj) + accessor, err := apimeta.ListAccessor(listObj) if err != nil { return fmt.Errorf("object: %T must be a list type", list) } allItems = append(allItems, items...) + // The last list call should have the most correct resource version. resourceVersion = accessor.GetResourceVersion() + if limitSet { + // decrement Limit by the number of items + // fetched from the current namespace. + listOpts.Limit -= int64(len(items)) + + // if a Limit was set and the number of + // items read has reached this set limit, + // then stop reading. + if listOpts.Limit == 0 { + break + } + } } listAccessor.SetResourceVersion(resourceVersion) - return apimeta.SetList(list, allItems) + if err := apimeta.SetList(list, allItems); err != nil { + return err + } + + list.SetContinue("continue-not-supported") + return nil } -// multiNamespaceInformer knows how to handle interacting with the underlying informer across multiple namespaces +// multiNamespaceInformer knows how to handle interacting with the underlying informer across multiple namespaces. type multiNamespaceInformer struct { namespaceToInformer map[string]Informer } +type handlerRegistration struct { + handles map[string]toolscache.ResourceEventHandlerRegistration +} + +// HasSynced asserts that the handler has been called for the full initial state of the informer. +func (h handlerRegistration) HasSynced() bool { + for _, h := range h.handles { + if !h.HasSynced() { + return false + } + } + return true +} + var _ Informer = &multiNamespaceInformer{} -// AddEventHandler adds the handler to each namespaced informer -func (i *multiNamespaceInformer) AddEventHandler(handler toolscache.ResourceEventHandler) { - for _, informer := range i.namespaceToInformer { - informer.AddEventHandler(handler) +// AddEventHandler adds the handler to each informer. +func (i *multiNamespaceInformer) AddEventHandler(handler toolscache.ResourceEventHandler) (toolscache.ResourceEventHandlerRegistration, error) { + handles := handlerRegistration{ + handles: make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer)), } + + for ns, informer := range i.namespaceToInformer { + registration, err := informer.AddEventHandler(handler) + if err != nil { + return nil, err + } + handles.handles[ns] = registration + } + + return handles, nil } -// AddEventHandlerWithResyncPeriod adds the handler with a resync period to each namespaced informer -func (i *multiNamespaceInformer) AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) { - for _, informer := range i.namespaceToInformer { - informer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod) +// AddEventHandlerWithResyncPeriod adds the handler with a resync period to each namespaced informer. +func (i *multiNamespaceInformer) AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) (toolscache.ResourceEventHandlerRegistration, error) { + handles := handlerRegistration{ + handles: make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer)), + } + + for ns, informer := range i.namespaceToInformer { + registration, err := informer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod) + if err != nil { + return nil, err + } + handles.handles[ns] = registration + } + + return handles, nil +} + +// AddEventHandlerWithOptions adds the handler with options to each namespaced informer. +func (i *multiNamespaceInformer) AddEventHandlerWithOptions(handler toolscache.ResourceEventHandler, options toolscache.HandlerOptions) (toolscache.ResourceEventHandlerRegistration, error) { + handles := handlerRegistration{ + handles: make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer)), + } + + for ns, informer := range i.namespaceToInformer { + registration, err := informer.AddEventHandlerWithOptions(handler, options) + if err != nil { + return nil, err + } + handles.handles[ns] = registration } + + return handles, nil } -// AddIndexers adds the indexer for each namespaced informer +// RemoveEventHandler removes a previously added event handler given by its registration handle. +func (i *multiNamespaceInformer) RemoveEventHandler(h toolscache.ResourceEventHandlerRegistration) error { + handles, ok := h.(handlerRegistration) + if !ok { + return fmt.Errorf("registration is not a registration returned by multiNamespaceInformer") + } + for ns, informer := range i.namespaceToInformer { + registration, ok := handles.handles[ns] + if !ok { + continue + } + if err := informer.RemoveEventHandler(registration); err != nil { + return err + } + } + return nil +} + +// AddIndexers adds the indexers to each informer. func (i *multiNamespaceInformer) AddIndexers(indexers toolscache.Indexers) error { for _, informer := range i.namespaceToInformer { err := informer.AddIndexers(indexers) @@ -209,11 +426,21 @@ func (i *multiNamespaceInformer) AddIndexers(indexers toolscache.Indexers) error return nil } -// HasSynced checks if each namespaced informer has synced +// HasSynced checks if each informer has synced. func (i *multiNamespaceInformer) HasSynced() bool { for _, informer := range i.namespaceToInformer { - if ok := informer.HasSynced(); !ok { - return ok + if !informer.HasSynced() { + return false + } + } + return true +} + +// IsStopped checks if each namespaced informer has stopped, returns false if any are still running. +func (i *multiNamespaceInformer) IsStopped() bool { + for _, informer := range i.namespaceToInformer { + if stopped := informer.IsStopped(); !stopped { + return false } } return true diff --git a/pkg/certwatcher/certwatcher.go b/pkg/certwatcher/certwatcher.go new file mode 100644 index 0000000000..2362d020b8 --- /dev/null +++ b/pkg/certwatcher/certwatcher.go @@ -0,0 +1,251 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certwatcher + +import ( + "bytes" + "context" + "crypto/tls" + "fmt" + "os" + "sync" + "time" + + "github.com/fsnotify/fsnotify" + "github.com/go-logr/logr" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("certwatcher") + +const defaultWatchInterval = 10 * time.Second + +// CertWatcher watches certificate and key files for changes. +// It always returns the cached version, +// but periodically reads and parses certificate and key for changes +// and calls an optional callback with the new certificate. +type CertWatcher struct { + sync.RWMutex + + currentCert *tls.Certificate + watcher *fsnotify.Watcher + interval time.Duration + log logr.Logger + + certPath string + keyPath string + + cachedKeyPEMBlock []byte + + // callback is a function to be invoked when the certificate changes. + callback func(tls.Certificate) +} + +// New returns a new CertWatcher watching the given certificate and key. +func New(certPath, keyPath string) (*CertWatcher, error) { + var err error + + cw := &CertWatcher{ + certPath: certPath, + keyPath: keyPath, + interval: defaultWatchInterval, + log: log.WithValues("cert", certPath, "key", keyPath), + } + + // Initial read of certificate and key. + if err := cw.ReadCertificate(); err != nil { + return nil, err + } + + cw.watcher, err = fsnotify.NewWatcher() + if err != nil { + return nil, err + } + + return cw, nil +} + +// WithWatchInterval sets the watch interval and returns the CertWatcher pointer +func (cw *CertWatcher) WithWatchInterval(interval time.Duration) *CertWatcher { + cw.interval = interval + return cw +} + +// RegisterCallback registers a callback to be invoked when the certificate changes. +func (cw *CertWatcher) RegisterCallback(callback func(tls.Certificate)) { + cw.Lock() + defer cw.Unlock() + // If the current certificate is not nil, invoke the callback immediately. + if cw.currentCert != nil { + callback(*cw.currentCert) + } + cw.callback = callback +} + +// GetCertificate fetches the currently loaded certificate, which may be nil. +func (cw *CertWatcher) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { + cw.RLock() + defer cw.RUnlock() + return cw.currentCert, nil +} + +// Start starts the watch on the certificate and key files. +func (cw *CertWatcher) Start(ctx context.Context) error { + files := sets.New(cw.certPath, cw.keyPath) + + { + var watchErr error + if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 10*time.Second, true, func(ctx context.Context) (done bool, err error) { + for _, f := range files.UnsortedList() { + if err := cw.watcher.Add(f); err != nil { + watchErr = err + return false, nil //nolint:nilerr // We want to keep trying. + } + // We've added the watch, remove it from the set. + files.Delete(f) + } + return true, nil + }); err != nil { + return fmt.Errorf("failed to add watches: %w", kerrors.NewAggregate([]error{err, watchErr})) + } + } + + go cw.Watch() + + ticker := time.NewTicker(cw.interval) + defer ticker.Stop() + + cw.log.Info("Starting certificate poll+watcher", "interval", cw.interval) + for { + select { + case <-ctx.Done(): + return cw.watcher.Close() + case <-ticker.C: + if err := cw.ReadCertificate(); err != nil { + cw.log.Error(err, "failed read certificate") + } + } + } +} + +// Watch reads events from the watcher's channel and reacts to changes. +func (cw *CertWatcher) Watch() { + for { + select { + case event, ok := <-cw.watcher.Events: + // Channel is closed. + if !ok { + return + } + + cw.handleEvent(event) + case err, ok := <-cw.watcher.Errors: + // Channel is closed. + if !ok { + return + } + + cw.log.Error(err, "certificate watch error") + } + } +} + +// updateCachedCertificate checks if the new certificate differs from the cache, +// updates it and returns the result if it was updated or not +func (cw *CertWatcher) updateCachedCertificate(cert *tls.Certificate, keyPEMBlock []byte) bool { + cw.Lock() + defer cw.Unlock() + + if cw.currentCert != nil && + bytes.Equal(cw.currentCert.Certificate[0], cert.Certificate[0]) && + bytes.Equal(cw.cachedKeyPEMBlock, keyPEMBlock) { + cw.log.V(7).Info("certificate already cached") + return false + } + cw.currentCert = cert + cw.cachedKeyPEMBlock = keyPEMBlock + return true +} + +// ReadCertificate reads the certificate and key files from disk, parses them, +// and updates the current certificate on the watcher if updated. If a callback is set, it +// is invoked with the new certificate. +func (cw *CertWatcher) ReadCertificate() error { + metrics.ReadCertificateTotal.Inc() + certPEMBlock, err := os.ReadFile(cw.certPath) + if err != nil { + metrics.ReadCertificateErrors.Inc() + return err + } + keyPEMBlock, err := os.ReadFile(cw.keyPath) + if err != nil { + metrics.ReadCertificateErrors.Inc() + return err + } + + cert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + metrics.ReadCertificateErrors.Inc() + return err + } + + if !cw.updateCachedCertificate(&cert, keyPEMBlock) { + return nil + } + + cw.log.Info("Updated current TLS certificate") + + // If a callback is registered, invoke it with the new certificate. + cw.RLock() + defer cw.RUnlock() + if cw.callback != nil { + go func() { + cw.callback(cert) + }() + } + return nil +} + +func (cw *CertWatcher) handleEvent(event fsnotify.Event) { + // Only care about events which may modify the contents of the file. + switch { + case event.Op.Has(fsnotify.Write): + case event.Op.Has(fsnotify.Create): + case event.Op.Has(fsnotify.Chmod), event.Op.Has(fsnotify.Remove): + // If the file was removed or renamed, re-add the watch to the previous name + if err := cw.watcher.Add(event.Name); err != nil { + cw.log.Error(err, "error re-watching file") + } + default: + return + } + + cw.log.V(1).Info("certificate event", "event", event) + if err := cw.ReadCertificate(); err != nil { + cw.log.Error(err, "error re-reading certificate") + } +} + +// NeedLeaderElection indicates that the cert-manager +// does not need leader election. +func (cw *CertWatcher) NeedLeaderElection() bool { + return false +} diff --git a/pkg/certwatcher/certwatcher_suite_test.go b/pkg/certwatcher/certwatcher_suite_test.go new file mode 100644 index 0000000000..d0d9a72a62 --- /dev/null +++ b/pkg/certwatcher/certwatcher_suite_test.go @@ -0,0 +1,48 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certwatcher_test + +import ( + "os" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var ( + certPath = "testdata/tls.crt" + keyPath = "testdata/tls.key" +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "CertWatcher Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) +}) + +var _ = AfterSuite(func() { + for _, file := range []string{certPath, keyPath, certPath + ".new", keyPath + ".new", certPath + ".old", keyPath + ".old"} { + _ = os.Remove(file) + } +}) diff --git a/pkg/certwatcher/certwatcher_test.go b/pkg/certwatcher/certwatcher_test.go new file mode 100644 index 0000000000..9737925a6b --- /dev/null +++ b/pkg/certwatcher/certwatcher_test.go @@ -0,0 +1,324 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certwatcher_test + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "net" + "os" + "sync/atomic" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/prometheus/client_golang/prometheus/testutil" + + "sigs.k8s.io/controller-runtime/pkg/certwatcher" + "sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +var _ = Describe("CertWatcher", func() { + var _ = Describe("certwatcher New", func() { + It("should errors without cert/key", func() { + _, err := certwatcher.New("", "") + Expect(err).To(HaveOccurred()) + }) + }) + + var _ = Describe("certwatcher Start", func() { + var ( + ctxCancel context.CancelFunc + watcher *certwatcher.CertWatcher + startWatcher func(interval time.Duration) (done <-chan struct{}) + ) + + BeforeEach(func() { + var ctx context.Context + ctx, ctxCancel = context.WithCancel(context.Background()) //nolint:forbidigo // the watcher outlives the BeforeEach + + err := writeCerts(certPath, keyPath, "127.0.0.1") + Expect(err).ToNot(HaveOccurred()) + + Eventually(func() error { + for _, file := range []string{certPath, keyPath} { + _, err := os.ReadFile(file) + if err != nil { + return err + } + continue + } + + return nil + }).Should(Succeed()) + + watcher, err = certwatcher.New(certPath, keyPath) + Expect(err).ToNot(HaveOccurred()) + + startWatcher = func(interval time.Duration) (done <-chan struct{}) { + doneCh := make(chan struct{}) + go func() { + defer GinkgoRecover() + defer close(doneCh) + Expect(watcher.WithWatchInterval(interval).Start(ctx)).To(Succeed()) + }() + // wait till we read first cert + Eventually(func() error { + err := watcher.ReadCertificate() + return err + }).Should(Succeed()) + return doneCh + } + }) + + It("should not require LeaderElection", func() { + leaderElectionRunnable, isLeaderElectionRunnable := any(watcher).(manager.LeaderElectionRunnable) + Expect(isLeaderElectionRunnable).To(BeTrue()) + Expect(leaderElectionRunnable.NeedLeaderElection()).To(BeFalse()) + }) + + It("should read the initial cert/key", func() { + // This test verifies the initial read succeeded. So interval doesn't matter. + doneCh := startWatcher(10 * time.Second) + + ctxCancel() + Eventually(doneCh, "4s").Should(BeClosed()) + }) + + It("should reload currentCert when changed", func() { + // This test verifies fsnotify detects the cert change. So interval doesn't matter. + doneCh := startWatcher(10 * time.Second) + called := atomic.Int64{} + watcher.RegisterCallback(func(crt tls.Certificate) { + called.Add(1) + Expect(crt.Certificate).ToNot(BeEmpty()) + }) + + firstcert, _ := watcher.GetCertificate(nil) + + err := writeCerts(certPath, keyPath, "192.168.0.1") + Expect(err).ToNot(HaveOccurred()) + + Eventually(func() bool { + secondcert, _ := watcher.GetCertificate(nil) + first := firstcert.PrivateKey.(*rsa.PrivateKey) + return first.Equal(secondcert.PrivateKey) || firstcert.Leaf.SerialNumber == secondcert.Leaf.SerialNumber + }).ShouldNot(BeTrue()) + + ctxCancel() + Eventually(doneCh, "4s").Should(BeClosed()) + Expect(called.Load()).To(BeNumerically(">=", 1)) + }) + + It("should reload currentCert when changed with rename", func() { + // This test verifies fsnotify detects the cert change. So interval doesn't matter. + doneCh := startWatcher(10 * time.Second) + called := atomic.Int64{} + watcher.RegisterCallback(func(crt tls.Certificate) { + called.Add(1) + Expect(crt.Certificate).ToNot(BeEmpty()) + }) + + firstcert, _ := watcher.GetCertificate(nil) + + err := writeCerts(certPath+".new", keyPath+".new", "192.168.0.2") + Expect(err).ToNot(HaveOccurred()) + + Expect(os.Link(certPath, certPath+".old")).To(Succeed()) + Expect(os.Rename(certPath+".new", certPath)).To(Succeed()) + + Expect(os.Link(keyPath, keyPath+".old")).To(Succeed()) + Expect(os.Rename(keyPath+".new", keyPath)).To(Succeed()) + + Eventually(func() bool { + secondcert, _ := watcher.GetCertificate(nil) + first := firstcert.PrivateKey.(*rsa.PrivateKey) + return first.Equal(secondcert.PrivateKey) || firstcert.Leaf.SerialNumber == secondcert.Leaf.SerialNumber + }).ShouldNot(BeTrue()) + + ctxCancel() + Eventually(doneCh, "4s").Should(BeClosed()) + Expect(called.Load()).To(BeNumerically(">=", 1)) + }) + + It("should reload currentCert after move out", func() { + // This test verifies poll works, so we'll use 1s as interval (fsnotify doesn't detect this change). + doneCh := startWatcher(1 * time.Second) + called := atomic.Int64{} + watcher.RegisterCallback(func(crt tls.Certificate) { + called.Add(1) + Expect(crt.Certificate).ToNot(BeEmpty()) + }) + + firstcert, _ := watcher.GetCertificate(nil) + + Expect(os.Rename(certPath, certPath+".old")).To(Succeed()) + Expect(os.Rename(keyPath, keyPath+".old")).To(Succeed()) + + err := writeCerts(certPath, keyPath, "192.168.0.3") + Expect(err).ToNot(HaveOccurred()) + + Eventually(func() bool { + secondcert, _ := watcher.GetCertificate(nil) + first := firstcert.PrivateKey.(*rsa.PrivateKey) + return first.Equal(secondcert.PrivateKey) || firstcert.Leaf.SerialNumber == secondcert.Leaf.SerialNumber + }, "10s", "1s").ShouldNot(BeTrue()) + + ctxCancel() + Eventually(doneCh, "4s").Should(BeClosed()) + Expect(called.Load()).To(BeNumerically(">=", 1)) + }) + + Context("prometheus metric read_certificate_total", func() { + var readCertificateTotalBefore float64 + var readCertificateErrorsBefore float64 + + BeforeEach(func() { + readCertificateTotalBefore = testutil.ToFloat64(metrics.ReadCertificateTotal) + readCertificateErrorsBefore = testutil.ToFloat64(metrics.ReadCertificateErrors) + }) + + It("should get updated on successful certificate read", func() { + // This test verifies fsnotify, so interval doesn't matter. + doneCh := startWatcher(10 * time.Second) + + Eventually(func() error { + readCertificateTotalAfter := testutil.ToFloat64(metrics.ReadCertificateTotal) + if readCertificateTotalAfter < readCertificateTotalBefore+1.0 { + return fmt.Errorf("metric read certificate total expected at least: %v and got: %v", readCertificateTotalBefore+1.0, readCertificateTotalAfter) + } + return nil + }, "4s").Should(Succeed()) + + ctxCancel() + Eventually(doneCh, "4s").Should(BeClosed()) + }) + + It("should get updated on read certificate errors", func() { + // This test works with fsnotify, so interval doesn't matter. + doneCh := startWatcher(10 * time.Second) + + Eventually(func() error { + readCertificateTotalAfter := testutil.ToFloat64(metrics.ReadCertificateTotal) + if readCertificateTotalAfter < readCertificateTotalBefore+1.0 { + return fmt.Errorf("metric read certificate total expected at least: %v and got: %v", readCertificateTotalBefore+1.0, readCertificateTotalAfter) + } + readCertificateTotalBefore = readCertificateTotalAfter + return nil + }, "4s").Should(Succeed()) + + Expect(os.Remove(keyPath)).To(Succeed()) + + // Note, we are checking two errors here, because os.Remove generates two fsnotify events: Chmod + Remove + Eventually(func() error { + readCertificateTotalAfter := testutil.ToFloat64(metrics.ReadCertificateTotal) + if readCertificateTotalAfter < readCertificateTotalBefore+2.0 { + return fmt.Errorf("metric read certificate total expected at least: %v and got: %v", readCertificateTotalBefore+2.0, readCertificateTotalAfter) + } + return nil + }, "4s").Should(Succeed()) + Eventually(func() error { + readCertificateErrorsAfter := testutil.ToFloat64(metrics.ReadCertificateErrors) + if readCertificateErrorsAfter < readCertificateErrorsBefore+2.0 { + return fmt.Errorf("metric read certificate errors expected at least: %v and got: %v", readCertificateErrorsBefore+2.0, readCertificateErrorsAfter) + } + return nil + }, "4s").Should(Succeed()) + + ctxCancel() + Eventually(doneCh, "4s").Should(BeClosed()) + }) + }) + }) +}) + +func writeCerts(certPath, keyPath, ip string) error { + var priv interface{} + var err error + priv, err = rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return err + } + + keyUsage := x509.KeyUsageDigitalSignature + if _, isRSA := priv.(*rsa.PrivateKey); isRSA { + keyUsage |= x509.KeyUsageKeyEncipherment + } + + notBefore := time.Now() + notAfter := notBefore.Add(1 * time.Hour) + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return err + } + + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + Organization: []string{"Kubernetes"}, + }, + NotBefore: notBefore, + NotAfter: notAfter, + + KeyUsage: keyUsage, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + template.IPAddresses = append(template.IPAddresses, net.ParseIP(ip)) + + privkey := priv.(*rsa.PrivateKey) + + derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privkey.PublicKey, priv) + if err != nil { + return err + } + + certOut, err := os.Create(certPath) + if err != nil { + return err + } + if err := pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { + return err + } + if err := certOut.Close(); err != nil { + return err + } + + keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + privBytes, err := x509.MarshalPKCS8PrivateKey(priv) + if err != nil { + return err + } + if err := pem.Encode(keyOut, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes}); err != nil { + return err + } + return keyOut.Close() +} diff --git a/pkg/runtime/inject/doc.go b/pkg/certwatcher/doc.go similarity index 61% rename from pkg/runtime/inject/doc.go rename to pkg/certwatcher/doc.go index 17c60895f0..40c2fc0bfb 100644 --- a/pkg/runtime/inject/doc.go +++ b/pkg/certwatcher/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,8 +15,9 @@ limitations under the License. */ /* -Package inject defines interfaces and functions for propagating dependencies from a ControllerManager to -the components registered with it. Dependencies are propagated to Reconciler, Source, EventHandler and Predicate -objects which implement the Injectable interfaces. +Package certwatcher is a helper for reloading Certificates from disk to be used +with tls servers. It provides a helper func `GetCertificate` which can be +called from `tls.Config` and passed into your tls.Listener. For a detailed +example server view pkg/webhook/server.go. */ -package inject +package certwatcher diff --git a/pkg/certwatcher/example_test.go b/pkg/certwatcher/example_test.go new file mode 100644 index 0000000000..e85b2403cb --- /dev/null +++ b/pkg/certwatcher/example_test.go @@ -0,0 +1,81 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certwatcher_test + +import ( + "context" + "crypto/tls" + "net/http" + "time" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/certwatcher" +) + +type sampleServer struct { +} + +func Example() { + // Setup Context + ctx := ctrl.SetupSignalHandler() + + // Initialize a new cert watcher with cert/key pair + watcher, err := certwatcher.New("ssl/tls.crt", "ssl/tls.key") + if err != nil { + panic(err) + } + + // Start goroutine with certwatcher running against supplied cert + go func() { + if err := watcher.Start(ctx); err != nil { + panic(err) + } + }() + + // Setup TLS listener using GetCertficate for fetching the cert when changes + listener, err := tls.Listen("tcp", "localhost:9443", &tls.Config{ + GetCertificate: watcher.GetCertificate, + MinVersion: tls.VersionTLS12, + }) + if err != nil { + panic(err) + } + + // Initialize your tls server + srv := &http.Server{ + Handler: &sampleServer{}, + ReadHeaderTimeout: 5 * time.Second, + } + + // Start goroutine for handling server shutdown. + go func() { + <-ctx.Done() + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if err := srv.Shutdown(ctx); err != nil { + panic(err) + } + }() + + // Serve t + if err := srv.Serve(listener); err != nil && err != http.ErrServerClosed { + panic(err) + } +} + +func (s *sampleServer) ServeHTTP(http.ResponseWriter, *http.Request) { +} diff --git a/pkg/certwatcher/metrics/metrics.go b/pkg/certwatcher/metrics/metrics.go new file mode 100644 index 0000000000..f128abbcf0 --- /dev/null +++ b/pkg/certwatcher/metrics/metrics.go @@ -0,0 +1,46 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +var ( + // ReadCertificateTotal is a prometheus counter metrics which holds the total + // number of certificate reads. + ReadCertificateTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "certwatcher_read_certificate_total", + Help: "Total number of certificate reads", + }) + + // ReadCertificateErrors is a prometheus counter metrics which holds the total + // number of errors from certificate read. + ReadCertificateErrors = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "certwatcher_read_certificate_errors_total", + Help: "Total number of certificate read errors", + }) +) + +func init() { + metrics.Registry.MustRegister( + ReadCertificateTotal, + ReadCertificateErrors, + ) +} diff --git a/pkg/certwatcher/testdata/.gitkeep b/pkg/certwatcher/testdata/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pkg/client/apiutil/apimachinery.go b/pkg/client/apiutil/apimachinery.go index cf775c3dc8..b132cb2d4d 100644 --- a/pkg/client/apiutil/apimachinery.go +++ b/pkg/client/apiutil/apimachinery.go @@ -20,64 +20,168 @@ limitations under the License. package apiutil import ( + "errors" "fmt" + "net/http" + "reflect" + "sync" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - "k8s.io/client-go/restmapper" ) -// NewDiscoveryRESTMapper constructs a new RESTMapper based on discovery -// information fetched by a new client with the given config. -func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { - // Get a mapper - dc := discovery.NewDiscoveryClientForConfigOrDie(c) - gr, err := restmapper.GetAPIGroupResources(dc) +var ( + protobufScheme = runtime.NewScheme() + protobufSchemeLock sync.RWMutex +) + +func init() { + // Currently only enabled for built-in resources which are guaranteed to implement Protocol Buffers. + // For custom resources, CRDs can not support Protocol Buffers but Aggregated API can. + // See doc: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility + if err := clientgoscheme.AddToScheme(protobufScheme); err != nil { + panic(err) + } +} + +// AddToProtobufScheme add the given SchemeBuilder into protobufScheme, which should +// be additional types that do support protobuf. +func AddToProtobufScheme(addToScheme func(*runtime.Scheme) error) error { + protobufSchemeLock.Lock() + defer protobufSchemeLock.Unlock() + return addToScheme(protobufScheme) +} + +// IsObjectNamespaced returns true if the object is namespace scoped. +// For unstructured objects the gvk is found from the object itself. +func IsObjectNamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper meta.RESTMapper) (bool, error) { + gvk, err := GVKForObject(obj, scheme) + if err != nil { + return false, err + } + + return IsGVKNamespaced(gvk, restmapper) +} + +// IsGVKNamespaced returns true if the object having the provided +// GVK is namespace scoped. +func IsGVKNamespaced(gvk schema.GroupVersionKind, restmapper meta.RESTMapper) (bool, error) { + // Fetch the RESTMapping using the complete GVK. If we exclude the Version, the Version set + // will be populated using the cached Group if available. This can lead to failures updating + // the cache with new Versions of CRDs registered at runtime. + restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}, gvk.Version) if err != nil { - return nil, err + return false, fmt.Errorf("failed to get restmapping: %w", err) + } + + scope := restmapping.Scope.Name() + if scope == "" { + return false, errors.New("scope cannot be identified, empty scope returned") + } + + if scope != meta.RESTScopeNameRoot { + return true, nil } - return restmapper.NewDiscoveryRESTMapper(gr), nil + return false, nil } // GVKForObject finds the GroupVersionKind associated with the given object, if there is only a single such GVK. func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionKind, error) { + // TODO(directxman12): do we want to generalize this to arbitrary container types? + // I think we'd need a generalized form of scheme or something. It's a + // shame there's not a reliable "GetGVK" interface that works by default + // for unpopulated static types and populated "dynamic" types + // (unstructured, partial, etc) + + // check for PartialObjectMetadata, which is analogous to unstructured, but isn't handled by ObjectKinds + _, isPartial := obj.(*metav1.PartialObjectMetadata) + _, isPartialList := obj.(*metav1.PartialObjectMetadataList) + if isPartial || isPartialList { + // we require that the GVK be populated in order to recognize the object + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return schema.GroupVersionKind{}, runtime.NewMissingKindErr("unstructured object has no kind") + } + if len(gvk.Version) == 0 { + return schema.GroupVersionKind{}, runtime.NewMissingVersionErr("unstructured object has no version") + } + return gvk, nil + } + + // Use the given scheme to retrieve all the GVKs for the object. gvks, isUnversioned, err := scheme.ObjectKinds(obj) if err != nil { return schema.GroupVersionKind{}, err } if isUnversioned { - return schema.GroupVersionKind{}, fmt.Errorf("cannot create a new informer for the unversioned type %T", obj) + return schema.GroupVersionKind{}, fmt.Errorf("cannot create group-version-kind for unversioned type %T", obj) } - if len(gvks) < 1 { - return schema.GroupVersionKind{}, fmt.Errorf("no group-version-kinds associated with type %T", obj) - } - if len(gvks) > 1 { - // this should only trigger for things like metav1.XYZ -- - // normal versioned types should be fine + switch { + case len(gvks) < 1: + // If the object has no GVK, the object might not have been registered with the scheme. + // or it's not a valid object. + return schema.GroupVersionKind{}, fmt.Errorf("no GroupVersionKind associated with Go type %T, was the type registered with the Scheme?", obj) + case len(gvks) > 1: + err := fmt.Errorf("multiple GroupVersionKinds associated with Go type %T within the Scheme, this can happen when a type is registered for multiple GVKs at the same time", obj) + + // We've found multiple GVKs for the object. + currentGVK := obj.GetObjectKind().GroupVersionKind() + if !currentGVK.Empty() { + // If the base object has a GVK, check if it's in the list of GVKs before using it. + for _, gvk := range gvks { + if gvk == currentGVK { + return gvk, nil + } + } + + return schema.GroupVersionKind{}, fmt.Errorf( + "%w: the object's supplied GroupVersionKind %q was not found in the Scheme's list; refusing to guess at one: %q", err, currentGVK, gvks) + } + + // This should only trigger for things like metav1.XYZ -- + // normal versioned types should be fine. + // + // See https://github.com/kubernetes-sigs/controller-runtime/issues/362 + // for more information. return schema.GroupVersionKind{}, fmt.Errorf( - "multiple group-version-kinds associated with type %T, refusing to guess at one", obj) + "%w: callers can either fix their type registration to only register it once, or specify the GroupVersionKind to use for object passed in; refusing to guess at one: %q", err, gvks) + default: + // In any other case, we've found a single GVK for the object. + return gvks[0], nil } - return gvks[0], nil } // RESTClientForGVK constructs a new rest.Interface capable of accessing the resource associated // with the given GroupVersionKind. The REST client will be configured to use the negotiated serializer from // baseConfig, if set, otherwise a default serializer will be set. -func RESTClientForGVK(gvk schema.GroupVersionKind, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) { - cfg := createRestConfig(gvk, baseConfig) - if cfg.NegotiatedSerializer == nil { - cfg.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: codecs} +func RESTClientForGVK( + gvk schema.GroupVersionKind, + forceDisableProtoBuf bool, + isUnstructured bool, + baseConfig *rest.Config, + codecs serializer.CodecFactory, + httpClient *http.Client, +) (rest.Interface, error) { + if httpClient == nil { + return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") } - return rest.RESTClientFor(cfg) + return rest.RESTClientForConfigAndClient(createRestConfig(gvk, forceDisableProtoBuf, isUnstructured, baseConfig, codecs), httpClient) } -//createRestConfig copies the base config and updates needed fields for a new rest config -func createRestConfig(gvk schema.GroupVersionKind, baseConfig *rest.Config) *rest.Config { +// createRestConfig copies the base config and updates needed fields for a new rest config. +func createRestConfig(gvk schema.GroupVersionKind, + forceDisableProtoBuf bool, + isUnstructured bool, + baseConfig *rest.Config, + codecs serializer.CodecFactory, +) *rest.Config { gv := gvk.GroupVersion() cfg := rest.CopyConfig(baseConfig) @@ -90,5 +194,47 @@ func createRestConfig(gvk schema.GroupVersionKind, baseConfig *rest.Config) *res if cfg.UserAgent == "" { cfg.UserAgent = rest.DefaultKubernetesUserAgent() } + // TODO(FillZpp): In the long run, we want to check discovery or something to make sure that this is actually true. + if cfg.ContentType == "" && !forceDisableProtoBuf { + protobufSchemeLock.RLock() + if protobufScheme.Recognizes(gvk) { + cfg.ContentType = runtime.ContentTypeProtobuf + } + protobufSchemeLock.RUnlock() + } + + if isUnstructured { + // If the object is unstructured, we use the client-go dynamic serializer. + cfg = dynamic.ConfigFor(cfg) + } else { + cfg.NegotiatedSerializer = serializerWithTargetZeroingDecode{NegotiatedSerializer: serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} + } + return cfg } + +type serializerWithTargetZeroingDecode struct { + runtime.NegotiatedSerializer +} + +func (s serializerWithTargetZeroingDecode) DecoderToVersion(serializer runtime.Decoder, r runtime.GroupVersioner) runtime.Decoder { + return targetZeroingDecoder{upstream: s.NegotiatedSerializer.DecoderToVersion(serializer, r)} +} + +type targetZeroingDecoder struct { + upstream runtime.Decoder +} + +func (t targetZeroingDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + zero(into) + return t.upstream.Decode(data, defaults, into) +} + +// zero zeros the value of a pointer. +func zero(x interface{}) { + if x == nil { + return + } + res := reflect.ValueOf(x).Elem() + res.Set(reflect.Zero(res.Type())) +} diff --git a/pkg/client/apiutil/apimachinery_test.go b/pkg/client/apiutil/apimachinery_test.go new file mode 100644 index 0000000000..122c5cc542 --- /dev/null +++ b/pkg/client/apiutil/apimachinery_test.go @@ -0,0 +1,191 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil_test + +import ( + "context" + "strconv" + "testing" + + gmg "github.com/onsi/gomega" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +func TestApiMachinery(t *testing.T) { + for _, aggregatedDiscovery := range []bool{true, false} { + t.Run("aggregatedDiscovery="+strconv.FormatBool(aggregatedDiscovery), func(t *testing.T) { + restCfg := setupEnvtest(t, !aggregatedDiscovery) + + // Details of the GVK registered at initialization. + initialGvk := metav1.GroupVersionKind{ + Group: "crew.example.com", + Version: "v1", + Kind: "Driver", + } + + // A set of GVKs to register at runtime with varying properties. + runtimeGvks := []struct { + name string + gvk metav1.GroupVersionKind + plural string + }{ + { + name: "new Kind and Version added to existing Group", + gvk: metav1.GroupVersionKind{ + Group: "crew.example.com", + Version: "v1alpha1", + Kind: "Passenger", + }, + plural: "passengers", + }, + { + name: "new Kind added to existing Group and Version", + gvk: metav1.GroupVersionKind{ + Group: "crew.example.com", + Version: "v1", + Kind: "Garage", + }, + plural: "garages", + }, + { + name: "new GVK", + gvk: metav1.GroupVersionKind{ + Group: "inventory.example.com", + Version: "v1", + Kind: "Taxi", + }, + plural: "taxis", + }, + } + + t.Run("IsGVKNamespaced should report scope for GVK registered at initialization", func(t *testing.T) { + g := gmg.NewWithT(t) + + httpClient, err := rest.HTTPClientFor(restCfg) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + lazyRestMapper, err := apiutil.NewDynamicRESTMapper(restCfg, httpClient) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + s := scheme.Scheme + err = apiextensionsv1.AddToScheme(s) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + // Query the scope of a GVK that was registered at initialization. + scope, err := apiutil.IsGVKNamespaced( + schema.GroupVersionKind(initialGvk), + lazyRestMapper, + ) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(scope).To(gmg.BeTrue()) + }) + + for _, runtimeGvk := range runtimeGvks { + t.Run("IsGVKNamespaced should report scope for "+runtimeGvk.name, func(t *testing.T) { + g := gmg.NewWithT(t) + + httpClient, err := rest.HTTPClientFor(restCfg) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + lazyRestMapper, err := apiutil.NewDynamicRESTMapper(restCfg, httpClient) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + s := scheme.Scheme + err = apiextensionsv1.AddToScheme(s) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + c, err := client.New(restCfg, client.Options{Scheme: s}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + // Run a valid query to initialize cache. + scope, err := apiutil.IsGVKNamespaced( + schema.GroupVersionKind(initialGvk), + lazyRestMapper, + ) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(scope).To(gmg.BeTrue()) + + // Register a new CRD at runtime. + crd := newCRD(t.Context(), g, c, runtimeGvk.gvk.Group, runtimeGvk.gvk.Kind, runtimeGvk.plural) + version := crd.Spec.Versions[0] + version.Name = runtimeGvk.gvk.Version + version.Storage = true + version.Served = true + crd.Spec.Versions = []apiextensionsv1.CustomResourceDefinitionVersion{version} + crd.Spec.Scope = apiextensionsv1.NamespaceScoped + + g.Expect(c.Create(t.Context(), crd)).To(gmg.Succeed()) + t.Cleanup(func() { + g.Expect(c.Delete(context.Background(), crd)).To(gmg.Succeed()) //nolint:forbidigo //t.Context is cancelled in t.Cleanup + }) + + // Wait until the CRD is registered. + g.Eventually(func(g gmg.Gomega) { + isRegistered, err := isCrdRegistered(restCfg, runtimeGvk.gvk) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(isRegistered).To(gmg.BeTrue()) + }).Should(gmg.Succeed(), "GVK should be available") + + // Query the scope of the GVK registered at runtime. + scope, err = apiutil.IsGVKNamespaced( + schema.GroupVersionKind(runtimeGvk.gvk), + lazyRestMapper, + ) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(scope).To(gmg.BeTrue()) + }) + } + }) + } +} + +// Check if a slice of APIResource contains a given Kind. +func kindInAPIResources(resources *metav1.APIResourceList, kind string) bool { + for _, res := range resources.APIResources { + if res.Kind == kind { + return true + } + } + return false +} + +// Check if a CRD has registered with the API server using a DiscoveryClient. +func isCrdRegistered(cfg *rest.Config, gvk metav1.GroupVersionKind) (bool, error) { + discHTTP, err := rest.HTTPClientFor(cfg) + if err != nil { + return false, err + } + + discClient, err := discovery.NewDiscoveryClientForConfigAndClient(cfg, discHTTP) + if err != nil { + return false, err + } + + resources, err := discClient.ServerResourcesForGroupVersion(gvk.Group + "/" + gvk.Version) + if err != nil { + return false, err + } + + return kindInAPIResources(resources, gvk.Kind), nil +} diff --git a/pkg/client/apiutil/apiutil_suite_test.go b/pkg/client/apiutil/apiutil_suite_test.go new file mode 100644 index 0000000000..7fe960b917 --- /dev/null +++ b/pkg/client/apiutil/apiutil_suite_test.go @@ -0,0 +1,42 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/client-go/rest" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "API Utilities Test Suite") +} + +var cfg *rest.Config + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + // for things that technically need a rest.Config for defaulting, but don't actually use them + cfg = &rest.Config{} +}) diff --git a/pkg/client/apiutil/errors.go b/pkg/client/apiutil/errors.go new file mode 100644 index 0000000000..c216c49d2a --- /dev/null +++ b/pkg/client/apiutil/errors.go @@ -0,0 +1,54 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil + +import ( + "fmt" + "sort" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// ErrResourceDiscoveryFailed is returned if the RESTMapper cannot discover supported resources for some GroupVersions. +// It wraps the errors encountered, except "NotFound" errors are replaced with meta.NoResourceMatchError, for +// backwards compatibility with code that uses meta.IsNoMatchError() to check for unsupported APIs. +type ErrResourceDiscoveryFailed map[schema.GroupVersion]error + +// Error implements the error interface. +func (e *ErrResourceDiscoveryFailed) Error() string { + subErrors := []string{} + for k, v := range *e { + subErrors = append(subErrors, fmt.Sprintf("%s: %v", k, v)) + } + sort.Strings(subErrors) + return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(subErrors, ", ")) +} + +func (e *ErrResourceDiscoveryFailed) Unwrap() []error { + subErrors := []error{} + for gv, err := range *e { + if apierrors.IsNotFound(err) { + err = &meta.NoResourceMatchError{PartialResource: gv.WithResource("")} + } + subErrors = append(subErrors, err) + } + return subErrors +} diff --git a/pkg/client/apiutil/restmapper.go b/pkg/client/apiutil/restmapper.go new file mode 100644 index 0000000000..7a7a0d1145 --- /dev/null +++ b/pkg/client/apiutil/restmapper.go @@ -0,0 +1,372 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil + +import ( + "fmt" + "net/http" + "sync" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + "k8s.io/utils/ptr" +) + +// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic +// RESTMapper dynamically discovers resource types at runtime. +func NewDynamicRESTMapper(cfg *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { + if httpClient == nil { + return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") + } + + client, err := discovery.NewDiscoveryClientForConfigAndClient(cfg, httpClient) + if err != nil { + return nil, err + } + + return &mapper{ + mapper: restmapper.NewDiscoveryRESTMapper([]*restmapper.APIGroupResources{}), + client: client, + knownGroups: map[string]*restmapper.APIGroupResources{}, + apiGroups: map[string]*metav1.APIGroup{}, + }, nil +} + +// mapper is a RESTMapper that will lazily query the provided +// client for discovery information to do REST mappings. +type mapper struct { + mapper meta.RESTMapper + client discovery.AggregatedDiscoveryInterface + knownGroups map[string]*restmapper.APIGroupResources + apiGroups map[string]*metav1.APIGroup + + initialDiscoveryDone bool + + // mutex to provide thread-safe mapper reloading. + // It protects all fields in the mapper as well as methods + // that have the `Locked` suffix. + mu sync.RWMutex +} + +// KindFor implements Mapper.KindFor. +func (m *mapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + res, err := m.getMapper().KindFor(resource) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { + return schema.GroupVersionKind{}, err + } + res, err = m.getMapper().KindFor(resource) + } + + return res, err +} + +// KindsFor implements Mapper.KindsFor. +func (m *mapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + res, err := m.getMapper().KindsFor(resource) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { + return nil, err + } + res, err = m.getMapper().KindsFor(resource) + } + + return res, err +} + +// ResourceFor implements Mapper.ResourceFor. +func (m *mapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + res, err := m.getMapper().ResourceFor(input) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(input.Group, input.Version); err != nil { + return schema.GroupVersionResource{}, err + } + res, err = m.getMapper().ResourceFor(input) + } + + return res, err +} + +// ResourcesFor implements Mapper.ResourcesFor. +func (m *mapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + res, err := m.getMapper().ResourcesFor(input) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(input.Group, input.Version); err != nil { + return nil, err + } + res, err = m.getMapper().ResourcesFor(input) + } + + return res, err +} + +// RESTMapping implements Mapper.RESTMapping. +func (m *mapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + res, err := m.getMapper().RESTMapping(gk, versions...) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(gk.Group, versions...); err != nil { + return nil, err + } + res, err = m.getMapper().RESTMapping(gk, versions...) + } + + return res, err +} + +// RESTMappings implements Mapper.RESTMappings. +func (m *mapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + res, err := m.getMapper().RESTMappings(gk, versions...) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(gk.Group, versions...); err != nil { + return nil, err + } + res, err = m.getMapper().RESTMappings(gk, versions...) + } + + return res, err +} + +// ResourceSingularizer implements Mapper.ResourceSingularizer. +func (m *mapper) ResourceSingularizer(resource string) (string, error) { + return m.getMapper().ResourceSingularizer(resource) +} + +func (m *mapper) getMapper() meta.RESTMapper { + m.mu.RLock() + defer m.mu.RUnlock() + return m.mapper +} + +// addKnownGroupAndReload reloads the mapper with updated information about missing API group. +// versions can be specified for partial updates, for instance for v1beta1 version only. +func (m *mapper) addKnownGroupAndReload(groupName string, versions ...string) error { + // versions will here be [""] if the forwarded Version value of + // GroupVersionResource (in calling method) was not specified. + if len(versions) == 1 && versions[0] == "" { + versions = nil + } + + m.mu.Lock() + defer m.mu.Unlock() + // If no specific versions are set by user, we will scan all available ones for the API group. + // This operation requires 2 requests: /api and /apis, but only once. For all subsequent calls + // this data will be taken from cache. + // + // We always run this once, because if the server supports aggregated discovery, this will + // load everything with two api calls which we assume is overall cheaper. + if len(versions) == 0 || !m.initialDiscoveryDone { + apiGroup, didAggregatedDiscovery, err := m.findAPIGroupByNameAndMaybeAggregatedDiscoveryLocked(groupName) + if err != nil { + return err + } + if apiGroup != nil && len(versions) == 0 { + for _, version := range apiGroup.Versions { + versions = append(versions, version.Version) + } + } + + // No need to do anything further if aggregatedDiscovery is supported and we did a lookup + if didAggregatedDiscovery { + failedGroups := make(map[schema.GroupVersion]error) + for _, version := range versions { + if m.knownGroups[groupName] == nil || m.knownGroups[groupName].VersionedResources[version] == nil { + failedGroups[schema.GroupVersion{Group: groupName, Version: version}] = &meta.NoResourceMatchError{ + PartialResource: schema.GroupVersionResource{ + Group: groupName, + Version: version, + }} + } + } + if len(failedGroups) > 0 { + return ptr.To(ErrResourceDiscoveryFailed(failedGroups)) + } + return nil + } + } + + // Update information for group resources about versioned resources. + // The number of API calls is equal to the number of versions: /apis//. + // If we encounter a missing API version (NotFound error), we will remove the group from + // the m.apiGroups and m.knownGroups caches. + // If this happens, in the next call the group will be added back to apiGroups + // and only the existing versions will be loaded in knownGroups. + groupVersionResources, err := m.fetchGroupVersionResourcesLocked(groupName, versions...) + if err != nil { + return fmt.Errorf("failed to get API group resources: %w", err) + } + + m.addGroupVersionResourcesToCacheAndReloadLocked(groupVersionResources) + return nil +} + +// addGroupVersionResourcesToCacheAndReloadLocked does what the name suggests. The mutex must be held when +// calling it. +func (m *mapper) addGroupVersionResourcesToCacheAndReloadLocked(gvr map[schema.GroupVersion]*metav1.APIResourceList) { + // Update information for group resources about the API group by adding new versions. + // Ignore the versions that are already registered + for groupVersion, resources := range gvr { + var groupResources *restmapper.APIGroupResources + if _, ok := m.knownGroups[groupVersion.Group]; ok { + groupResources = m.knownGroups[groupVersion.Group] + } else { + groupResources = &restmapper.APIGroupResources{ + Group: metav1.APIGroup{Name: groupVersion.Group}, + VersionedResources: make(map[string][]metav1.APIResource), + } + } + + version := groupVersion.Version + + groupResources.VersionedResources[version] = resources.APIResources + found := false + for _, v := range groupResources.Group.Versions { + if v.Version == version { + found = true + break + } + } + + if !found { + gv := metav1.GroupVersionForDiscovery{ + GroupVersion: metav1.GroupVersion{Group: groupVersion.Group, Version: version}.String(), + Version: version, + } + + // Prepend if preferred version, else append. The upstream DiscoveryRestMappper assumes + // the first version is the preferred one: https://github.com/kubernetes/kubernetes/blob/ef54ac803b712137871c1a1f8d635d50e69ffa6c/staging/src/k8s.io/apimachinery/pkg/api/meta/restmapper.go#L458-L461 + if group, ok := m.apiGroups[groupVersion.Group]; ok && group.PreferredVersion.Version == version { + groupResources.Group.Versions = append([]metav1.GroupVersionForDiscovery{gv}, groupResources.Group.Versions...) + } else { + groupResources.Group.Versions = append(groupResources.Group.Versions, gv) + } + } + + // Update data in the cache. + m.knownGroups[groupVersion.Group] = groupResources + } + + // Finally, reload the mapper. + updatedGroupResources := make([]*restmapper.APIGroupResources, 0, len(m.knownGroups)) + for _, agr := range m.knownGroups { + updatedGroupResources = append(updatedGroupResources, agr) + } + + m.mapper = restmapper.NewDiscoveryRESTMapper(updatedGroupResources) +} + +// findAPIGroupByNameAndMaybeAggregatedDiscoveryLocked tries to find the passed apiGroup. +// If the server supports aggregated discovery, it will always perform that. +func (m *mapper) findAPIGroupByNameAndMaybeAggregatedDiscoveryLocked(groupName string) (_ *metav1.APIGroup, didAggregatedDiscovery bool, _ error) { + // Looking in the cache first + group, ok := m.apiGroups[groupName] + if ok { + return group, false, nil + } + + // Update the cache if nothing was found. + apiGroups, maybeResources, _, err := m.client.GroupsAndMaybeResources() + if err != nil { + return nil, false, fmt.Errorf("failed to get server groups: %w", err) + } + if len(apiGroups.Groups) == 0 { + return nil, false, fmt.Errorf("received an empty API groups list") + } + + m.initialDiscoveryDone = true + for i := range apiGroups.Groups { + group := &apiGroups.Groups[i] + m.apiGroups[group.Name] = group + } + if len(maybeResources) > 0 { + didAggregatedDiscovery = true + m.addGroupVersionResourcesToCacheAndReloadLocked(maybeResources) + } + + // Looking in the cache again. + // Don't return an error here if the API group is not present. + // The reloaded RESTMapper will take care of returning a NoMatchError. + return m.apiGroups[groupName], didAggregatedDiscovery, nil +} + +// fetchGroupVersionResourcesLocked fetches the resources for the specified group and its versions. +// This method might modify the cache so it needs to be called under the lock. +func (m *mapper) fetchGroupVersionResourcesLocked(groupName string, versions ...string) (map[schema.GroupVersion]*metav1.APIResourceList, error) { + groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) + failedGroups := make(map[schema.GroupVersion]error) + + for _, version := range versions { + groupVersion := schema.GroupVersion{Group: groupName, Version: version} + + apiResourceList, err := m.client.ServerResourcesForGroupVersion(groupVersion.String()) + if apierrors.IsNotFound(err) { + // If the version is not found, we remove the group from the cache + // so it gets refreshed on the next call. + if m.isAPIGroupCachedLocked(groupVersion) { + delete(m.apiGroups, groupName) + } + if m.isGroupVersionCachedLocked(groupVersion) { + delete(m.knownGroups, groupName) + } + continue + } else if err != nil { + failedGroups[groupVersion] = err + } + + if apiResourceList != nil { + // even in case of error, some fallback might have been returned. + groupVersionResources[groupVersion] = apiResourceList + } + } + + if len(failedGroups) > 0 { + err := ErrResourceDiscoveryFailed(failedGroups) + return nil, &err + } + + return groupVersionResources, nil +} + +// isGroupVersionCachedLocked checks if a version for a group is cached in the known groups cache. +func (m *mapper) isGroupVersionCachedLocked(gv schema.GroupVersion) bool { + if cachedGroup, ok := m.knownGroups[gv.Group]; ok { + _, cached := cachedGroup.VersionedResources[gv.Version] + return cached + } + + return false +} + +// isAPIGroupCachedLocked checks if a version for a group is cached in the api groups cache. +func (m *mapper) isAPIGroupCachedLocked(gv schema.GroupVersion) bool { + cachedGroup, ok := m.apiGroups[gv.Group] + if !ok { + return false + } + + for _, version := range cachedGroup.Versions { + if version.Version == gv.Version { + return true + } + } + + return false +} diff --git a/pkg/client/apiutil/restmapper_test.go b/pkg/client/apiutil/restmapper_test.go new file mode 100644 index 0000000000..51807f12de --- /dev/null +++ b/pkg/client/apiutil/restmapper_test.go @@ -0,0 +1,833 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil_test + +import ( + "context" + "fmt" + "net/http" + "strconv" + "sync" + "testing" + + _ "github.com/onsi/ginkgo/v2" + gmg "github.com/onsi/gomega" + "github.com/onsi/gomega/format" + gomegatypes "github.com/onsi/gomega/types" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/discovery" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/envtest" +) + +// countingRoundTripper is used to count HTTP requests. +type countingRoundTripper struct { + roundTripper http.RoundTripper + requestCount int +} + +func newCountingRoundTripper(rt http.RoundTripper) *countingRoundTripper { + return &countingRoundTripper{roundTripper: rt} +} + +// RoundTrip implements http.RoundTripper.RoundTrip that additionally counts requests. +func (crt *countingRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + crt.requestCount++ + + return crt.roundTripper.RoundTrip(r) +} + +// GetRequestCount returns how many requests have been made. +func (crt *countingRoundTripper) GetRequestCount() int { + return crt.requestCount +} + +// Reset sets the counter to 0. +func (crt *countingRoundTripper) Reset() { + crt.requestCount = 0 +} + +func setupEnvtest(t *testing.T, disableAggregatedDiscovery bool) *rest.Config { + t.Log("Setup envtest") + + g := gmg.NewWithT(t) + testEnv := &envtest.Environment{ + CRDDirectoryPaths: []string{"testdata"}, + } + if disableAggregatedDiscovery { + testEnv.DownloadBinaryAssets = true + testEnv.DownloadBinaryAssetsVersion = "v1.28.0" + binaryAssetsDirectory, err := envtest.SetupEnvtestDefaultBinaryAssetsDirectory() + g.Expect(err).ToNot(gmg.HaveOccurred()) + testEnv.BinaryAssetsDirectory = binaryAssetsDirectory + testEnv.ControlPlane.GetAPIServer().Configure().Append("feature-gates", "AggregatedDiscoveryEndpoint=false") + } + + cfg, err := testEnv.Start() + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(cfg).NotTo(gmg.BeNil()) + + t.Cleanup(func() { + t.Log("Stop envtest") + g.Expect(testEnv.Stop()).To(gmg.Succeed()) + }) + + return cfg +} + +func TestLazyRestMapperProvider(t *testing.T) { + for _, aggregatedDiscovery := range []bool{true, false} { + t.Run("aggregatedDiscovery="+strconv.FormatBool(aggregatedDiscovery), func(t *testing.T) { + restCfg := setupEnvtest(t, !aggregatedDiscovery) + + t.Run("LazyRESTMapper should fetch data based on the request", func(t *testing.T) { + g := gmg.NewWithT(t) + + // For each new group it performs just one request to the API server: + // GET https://host/apis// + + httpClient, err := rest.HTTPClientFor(restCfg) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + crt := newCountingRoundTripper(httpClient.Transport) + httpClient.Transport = crt + + lazyRestMapper, err := apiutil.NewDynamicRESTMapper(restCfg, httpClient) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + // There are no requests before any call + g.Expect(crt.GetRequestCount()).To(gmg.Equal(0)) + + mapping, err := lazyRestMapper.RESTMapping(schema.GroupKind{Group: "apps", Kind: "deployment"}, "v1") + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(mapping.GroupVersionKind.Kind).To(gmg.Equal("deployment")) + expectedAPIRequestCount := 3 + if aggregatedDiscovery { + expectedAPIRequestCount = 2 + } + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + + mappings, err := lazyRestMapper.RESTMappings(schema.GroupKind{Group: "", Kind: "pod"}, "v1") + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(mappings).To(gmg.HaveLen(1)) + g.Expect(mappings[0].GroupVersionKind.Kind).To(gmg.Equal("pod")) + if !aggregatedDiscovery { + expectedAPIRequestCount++ + } + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + + kind, err := lazyRestMapper.KindFor(schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1", Resource: "ingresses"}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(kind.Kind).To(gmg.Equal("Ingress")) + if !aggregatedDiscovery { + expectedAPIRequestCount++ + } + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + + kinds, err := lazyRestMapper.KindsFor(schema.GroupVersionResource{Group: "authentication.k8s.io", Version: "v1", Resource: "tokenreviews"}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(kinds).To(gmg.HaveLen(1)) + g.Expect(kinds[0].Kind).To(gmg.Equal("TokenReview")) + if !aggregatedDiscovery { + expectedAPIRequestCount++ + } + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + + resource, err := lazyRestMapper.ResourceFor(schema.GroupVersionResource{Group: "scheduling.k8s.io", Version: "v1", Resource: "priorityclasses"}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(resource.Resource).To(gmg.Equal("priorityclasses")) + if !aggregatedDiscovery { + expectedAPIRequestCount++ + } + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + + resources, err := lazyRestMapper.ResourcesFor(schema.GroupVersionResource{Group: "policy", Version: "v1", Resource: "poddisruptionbudgets"}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(resources).To(gmg.HaveLen(1)) + g.Expect(resources[0].Resource).To(gmg.Equal("poddisruptionbudgets")) + if !aggregatedDiscovery { + expectedAPIRequestCount++ + } + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + }) + + t.Run("LazyRESTMapper should cache fetched data and doesn't perform any additional requests", func(t *testing.T) { + g := gmg.NewWithT(t) + + httpClient, err := rest.HTTPClientFor(restCfg) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + crt := newCountingRoundTripper(httpClient.Transport) + httpClient.Transport = crt + + lazyRestMapper, err := apiutil.NewDynamicRESTMapper(restCfg, httpClient) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + g.Expect(crt.GetRequestCount()).To(gmg.Equal(0)) + + mapping, err := lazyRestMapper.RESTMapping(schema.GroupKind{Group: "apps", Kind: "deployment"}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(mapping.GroupVersionKind.Kind).To(gmg.Equal("deployment")) + expectedAPIRequestCount := 3 + if aggregatedDiscovery { + expectedAPIRequestCount = 2 + } + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + + // Data taken from cache - there are no more additional requests. + + mapping, err = lazyRestMapper.RESTMapping(schema.GroupKind{Group: "apps", Kind: "deployment"}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(mapping.GroupVersionKind.Kind).To(gmg.Equal("deployment")) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + + kind, err := lazyRestMapper.KindFor((schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployment"})) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(kind.Kind).To(gmg.Equal("Deployment")) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + + resource, err := lazyRestMapper.ResourceFor((schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployment"})) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(resource.Resource).To(gmg.Equal("deployments")) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + }) + + t.Run("LazyRESTMapper should work correctly with empty versions list", func(t *testing.T) { + g := gmg.NewWithT(t) + + httpClient, err := rest.HTTPClientFor(restCfg) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + crt := newCountingRoundTripper(httpClient.Transport) + httpClient.Transport = crt + + lazyRestMapper, err := apiutil.NewDynamicRESTMapper(restCfg, httpClient) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + g.Expect(crt.GetRequestCount()).To(gmg.Equal(0)) + + // crew.example.com has 2 versions: v1 and v2 + + // If no versions were provided by user, we fetch all of them. + // Here we expect 4 calls. + // To initialize: + // #1: GET https://host/api + // #2: GET https://host/apis + // Then, for each version it performs one request to the API server: + // #3: GET https://host/apis/crew.example.com/v1 + // #4: GET https://host/apis/crew.example.com/v2 + mapping, err := lazyRestMapper.RESTMapping(schema.GroupKind{Group: "crew.example.com", Kind: "driver"}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(mapping.GroupVersionKind.Kind).To(gmg.Equal("driver")) + expectedAPIRequestCount := 4 + if aggregatedDiscovery { + expectedAPIRequestCount = 2 + } + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + + // All subsequent calls won't send requests to the server. + mapping, err = lazyRestMapper.RESTMapping(schema.GroupKind{Group: "crew.example.com", Kind: "driver"}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(mapping.GroupVersionKind.Kind).To(gmg.Equal("driver")) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + }) + + t.Run("LazyRESTMapper should work correctly with multiple API group versions", func(t *testing.T) { + g := gmg.NewWithT(t) + + httpClient, err := rest.HTTPClientFor(restCfg) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + crt := newCountingRoundTripper(httpClient.Transport) + httpClient.Transport = crt + + lazyRestMapper, err := apiutil.NewDynamicRESTMapper(restCfg, httpClient) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + g.Expect(crt.GetRequestCount()).To(gmg.Equal(0)) + + // We explicitly ask for 2 versions: v1 and v2. + // For each version it performs one request to the API server: + // #1: GET https://host/apis/crew.example.com/v1 + // #2: GET https://host/apis/crew.example.com/v2 + mapping, err := lazyRestMapper.RESTMapping(schema.GroupKind{Group: "crew.example.com", Kind: "driver"}, "v1", "v2") + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(mapping.GroupVersionKind.Kind).To(gmg.Equal("driver")) + expectedAPIRequestCount := 4 + if aggregatedDiscovery { + expectedAPIRequestCount = 2 + } + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + + // All subsequent calls won't send requests to the server as everything is stored in the cache. + mapping, err = lazyRestMapper.RESTMapping(schema.GroupKind{Group: "crew.example.com", Kind: "driver"}, "v1") + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(mapping.GroupVersionKind.Kind).To(gmg.Equal("driver")) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + + mapping, err = lazyRestMapper.RESTMapping(schema.GroupKind{Group: "crew.example.com", Kind: "driver"}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(mapping.GroupVersionKind.Kind).To(gmg.Equal("driver")) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + }) + + t.Run("LazyRESTMapper should work correctly with different API group versions", func(t *testing.T) { + g := gmg.NewWithT(t) + + httpClient, err := rest.HTTPClientFor(restCfg) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + crt := newCountingRoundTripper(httpClient.Transport) + httpClient.Transport = crt + + lazyRestMapper, err := apiutil.NewDynamicRESTMapper(restCfg, httpClient) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + g.Expect(crt.GetRequestCount()).To(gmg.Equal(0)) + + // Now we want resources for crew.example.com/v1 version only. + // Here we expect 1 call: + // #1: GET https://host/apis/crew.example.com/v1 + mapping, err := lazyRestMapper.RESTMapping(schema.GroupKind{Group: "crew.example.com", Kind: "driver"}, "v1") + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(mapping.GroupVersionKind.Kind).To(gmg.Equal("driver")) + expectedAPIRequestCount := 3 + if aggregatedDiscovery { + expectedAPIRequestCount = 2 + } + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + + // Get additional resources from v2. + // It sends another request: + // #2: GET https://host/apis/crew.example.com/v2 + mapping, err = lazyRestMapper.RESTMapping(schema.GroupKind{Group: "crew.example.com", Kind: "driver"}, "v2") + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(mapping.GroupVersionKind.Kind).To(gmg.Equal("driver")) + if !aggregatedDiscovery { + expectedAPIRequestCount++ + } + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + + // No subsequent calls require additional API requests. + mapping, err = lazyRestMapper.RESTMapping(schema.GroupKind{Group: "crew.example.com", Kind: "driver"}, "v1") + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(mapping.GroupVersionKind.Kind).To(gmg.Equal("driver")) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + + mapping, err = lazyRestMapper.RESTMapping(schema.GroupKind{Group: "crew.example.com", Kind: "driver"}, "v1", "v2") + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(mapping.GroupVersionKind.Kind).To(gmg.Equal("driver")) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + }) + + t.Run("LazyRESTMapper should return an error if the group doesn't exist", func(t *testing.T) { + g := gmg.NewWithT(t) + + // After initialization for each invalid group the mapper performs just 1 request to the API server. + + httpClient, err := rest.HTTPClientFor(restCfg) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + crt := newCountingRoundTripper(httpClient.Transport) + httpClient.Transport = crt + + lazyRestMapper, err := apiutil.NewDynamicRESTMapper(restCfg, httpClient) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + // A version is specified but the group doesn't exist. + // For each group, we expect 1 call to the version-specific discovery endpoint: + // #1: GET https://host/apis// + + _, err = lazyRestMapper.RESTMapping(schema.GroupKind{Group: "INVALID1"}, "v1") + g.Expect(err).To(gmg.HaveOccurred()) + g.Expect(meta.IsNoMatchError(err)).To(gmg.BeTrue()) + expectedAPIRequestCount := 3 + if aggregatedDiscovery { + expectedAPIRequestCount = 2 + } + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + crt.Reset() + + _, err = lazyRestMapper.RESTMappings(schema.GroupKind{Group: "INVALID2"}, "v1") + g.Expect(err).To(gmg.HaveOccurred()) + g.Expect(meta.IsNoMatchError(err)).To(gmg.BeTrue()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(1)) + + _, err = lazyRestMapper.KindFor(schema.GroupVersionResource{Group: "INVALID3", Version: "v1", Resource: "invalid"}) + g.Expect(err).To(gmg.HaveOccurred()) + g.Expect(meta.IsNoMatchError(err)).To(gmg.BeTrue()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(2)) + + _, err = lazyRestMapper.KindsFor(schema.GroupVersionResource{Group: "INVALID4", Version: "v1", Resource: "invalid"}) + g.Expect(err).To(gmg.HaveOccurred()) + g.Expect(meta.IsNoMatchError(err)).To(gmg.BeTrue()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(3)) + + _, err = lazyRestMapper.ResourceFor(schema.GroupVersionResource{Group: "INVALID5", Version: "v1", Resource: "invalid"}) + g.Expect(err).To(gmg.HaveOccurred()) + g.Expect(meta.IsNoMatchError(err)).To(gmg.BeTrue()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(4)) + + _, err = lazyRestMapper.ResourcesFor(schema.GroupVersionResource{Group: "INVALID6", Version: "v1", Resource: "invalid"}) + g.Expect(err).To(gmg.HaveOccurred()) + g.Expect(meta.IsNoMatchError(err)).To(gmg.BeTrue()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(5)) + + // No version is specified but the group doesn't exist. + // For each group, we expect 2 calls to discover all group versions: + // #1: GET https://host/api + // #2: GET https://host/apis + + _, err = lazyRestMapper.RESTMapping(schema.GroupKind{Group: "INVALID7"}) + g.Expect(err).To(beNoMatchError()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(7)) + + _, err = lazyRestMapper.RESTMappings(schema.GroupKind{Group: "INVALID8"}) + g.Expect(err).To(beNoMatchError()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(9)) + + _, err = lazyRestMapper.KindFor(schema.GroupVersionResource{Group: "INVALID9", Resource: "invalid"}) + g.Expect(err).To(beNoMatchError()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(11)) + + _, err = lazyRestMapper.KindsFor(schema.GroupVersionResource{Group: "INVALID10", Resource: "invalid"}) + g.Expect(err).To(beNoMatchError()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(13)) + + _, err = lazyRestMapper.ResourceFor(schema.GroupVersionResource{Group: "INVALID11", Resource: "invalid"}) + g.Expect(err).To(beNoMatchError()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(15)) + + _, err = lazyRestMapper.ResourcesFor(schema.GroupVersionResource{Group: "INVALID12", Resource: "invalid"}) + g.Expect(err).To(beNoMatchError()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(17)) + }) + + t.Run("LazyRESTMapper should return an error if a resource doesn't exist", func(t *testing.T) { + g := gmg.NewWithT(t) + + // For each invalid resource the mapper performs just 1 request to the API server. + + httpClient, err := rest.HTTPClientFor(restCfg) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + crt := newCountingRoundTripper(httpClient.Transport) + httpClient.Transport = crt + + lazyRestMapper, err := apiutil.NewDynamicRESTMapper(restCfg, httpClient) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + _, err = lazyRestMapper.RESTMapping(schema.GroupKind{Group: "apps", Kind: "INVALID"}, "v1") + g.Expect(err).To(gmg.HaveOccurred()) + g.Expect(meta.IsNoMatchError(err)).To(gmg.BeTrue()) + expectedAPIRequestCount := 3 + if aggregatedDiscovery { + expectedAPIRequestCount = 2 + } + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + crt.Reset() + + _, err = lazyRestMapper.RESTMappings(schema.GroupKind{Group: "", Kind: "INVALID"}, "v1") + g.Expect(err).To(gmg.HaveOccurred()) + g.Expect(meta.IsNoMatchError(err)).To(gmg.BeTrue()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(1)) + + _, err = lazyRestMapper.KindFor(schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1", Resource: "INVALID"}) + g.Expect(err).To(gmg.HaveOccurred()) + g.Expect(meta.IsNoMatchError(err)).To(gmg.BeTrue()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(2)) + + _, err = lazyRestMapper.KindsFor(schema.GroupVersionResource{Group: "authentication.k8s.io", Version: "v1", Resource: "INVALID"}) + g.Expect(err).To(gmg.HaveOccurred()) + g.Expect(meta.IsNoMatchError(err)).To(gmg.BeTrue()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(3)) + + _, err = lazyRestMapper.ResourceFor(schema.GroupVersionResource{Group: "scheduling.k8s.io", Version: "v1", Resource: "INVALID"}) + g.Expect(err).To(gmg.HaveOccurred()) + g.Expect(meta.IsNoMatchError(err)).To(gmg.BeTrue()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(4)) + + _, err = lazyRestMapper.ResourcesFor(schema.GroupVersionResource{Group: "policy", Version: "v1", Resource: "INVALID"}) + g.Expect(err).To(gmg.HaveOccurred()) + g.Expect(meta.IsNoMatchError(err)).To(gmg.BeTrue()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(5)) + }) + + t.Run("LazyRESTMapper should return an error if the version doesn't exist", func(t *testing.T) { + g := gmg.NewWithT(t) + + // After initialization, for each invalid resource mapper performs 1 requests to the API server. + + httpClient, err := rest.HTTPClientFor(restCfg) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + crt := newCountingRoundTripper(httpClient.Transport) + httpClient.Transport = crt + + lazyRestMapper, err := apiutil.NewDynamicRESTMapper(restCfg, httpClient) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + _, err = lazyRestMapper.RESTMapping(schema.GroupKind{Group: "apps", Kind: "deployment"}, "INVALID") + g.Expect(err).To(gmg.HaveOccurred()) + g.Expect(meta.IsNoMatchError(err)).To(gmg.BeTrue()) + expectedAPIRequestCount := 3 + if aggregatedDiscovery { + expectedAPIRequestCount = 2 + } + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + crt.Reset() + + _, err = lazyRestMapper.RESTMappings(schema.GroupKind{Group: "", Kind: "pod"}, "INVALID") + g.Expect(err).To(gmg.HaveOccurred()) + g.Expect(meta.IsNoMatchError(err)).To(gmg.BeTrue()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(1)) + + _, err = lazyRestMapper.KindFor(schema.GroupVersionResource{Group: "networking.k8s.io", Version: "INVALID", Resource: "ingresses"}) + g.Expect(err).To(gmg.HaveOccurred()) + g.Expect(meta.IsNoMatchError(err)).To(gmg.BeTrue()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(2)) + + _, err = lazyRestMapper.KindsFor(schema.GroupVersionResource{Group: "authentication.k8s.io", Version: "INVALID", Resource: "tokenreviews"}) + g.Expect(err).To(gmg.HaveOccurred()) + g.Expect(meta.IsNoMatchError(err)).To(gmg.BeTrue()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(3)) + + _, err = lazyRestMapper.ResourceFor(schema.GroupVersionResource{Group: "scheduling.k8s.io", Version: "INVALID", Resource: "priorityclasses"}) + g.Expect(err).To(gmg.HaveOccurred()) + g.Expect(meta.IsNoMatchError(err)).To(gmg.BeTrue()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(4)) + + _, err = lazyRestMapper.ResourcesFor(schema.GroupVersionResource{Group: "policy", Version: "INVALID", Resource: "poddisruptionbudgets"}) + g.Expect(err).To(gmg.HaveOccurred()) + g.Expect(meta.IsNoMatchError(err)).To(gmg.BeTrue()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(5)) + }) + + t.Run("LazyRESTMapper should work correctly if the version isn't specified", func(t *testing.T) { + g := gmg.NewWithT(t) + + httpClient, err := rest.HTTPClientFor(restCfg) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + lazyRestMapper, err := apiutil.NewDynamicRESTMapper(restCfg, httpClient) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + kind, err := lazyRestMapper.KindFor(schema.GroupVersionResource{Group: "networking.k8s.io", Resource: "ingress"}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(kind.Version).ToNot(gmg.BeEmpty()) + + kinds, err := lazyRestMapper.KindsFor(schema.GroupVersionResource{Group: "authentication.k8s.io", Resource: "tokenreviews"}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(kinds).ToNot(gmg.BeEmpty()) + g.Expect(kinds[0].Version).ToNot(gmg.BeEmpty()) + + resorce, err := lazyRestMapper.ResourceFor(schema.GroupVersionResource{Group: "scheduling.k8s.io", Resource: "priorityclasses"}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(resorce.Version).ToNot(gmg.BeEmpty()) + + resorces, err := lazyRestMapper.ResourcesFor(schema.GroupVersionResource{Group: "policy", Resource: "poddisruptionbudgets"}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(kinds).ToNot(gmg.BeEmpty()) + g.Expect(resorces[0].Version).ToNot(gmg.BeEmpty()) + }) + + t.Run("LazyRESTMapper can fetch CRDs if they were created at runtime", func(t *testing.T) { + g := gmg.NewWithT(t) + + // To fetch all versions mapper does 2 requests: + // GET https://host/api + // GET https://host/apis + // Then, for each version it performs just one request to the API server as usual: + // GET https://host/apis// + + httpClient, err := rest.HTTPClientFor(restCfg) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + crt := newCountingRoundTripper(httpClient.Transport) + httpClient.Transport = crt + + lazyRestMapper, err := apiutil.NewDynamicRESTMapper(restCfg, httpClient) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + // There are no requests before any call + g.Expect(crt.GetRequestCount()).To(gmg.Equal(0)) + + // Since we don't specify what version we expect, restmapper will fetch them all and search there. + // To fetch a list of available versions + // #1: GET https://host/api + // #2: GET https://host/apis + // Then, for each currently registered version: + // #3: GET https://host/apis/crew.example.com/v1 + // #4: GET https://host/apis/crew.example.com/v2 + mapping, err := lazyRestMapper.RESTMapping(schema.GroupKind{Group: "crew.example.com", Kind: "driver"}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(mapping.GroupVersionKind.Kind).To(gmg.Equal("driver")) + expectedAPIRequestCount := 4 + if aggregatedDiscovery { + expectedAPIRequestCount = 2 + } + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + + s := scheme.Scheme + err = apiextensionsv1.AddToScheme(s) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + c, err := client.New(restCfg, client.Options{Scheme: s}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + // Register another CRD in runtime - "riders.crew.example.com". + createNewCRD(t.Context(), g, c, "crew.example.com", "Rider", "riders") + + // Wait a bit until the CRD is registered. + g.Eventually(func() error { + _, err := lazyRestMapper.RESTMapping(schema.GroupKind{Group: "crew.example.com", Kind: "rider"}) + return err + }).Should(gmg.Succeed()) + + // Since we don't specify what version we expect, restmapper will fetch them all and search there. + // To fetch a list of available versions + // #1: GET https://host/api + // #2: GET https://host/apis + // Then, for each currently registered version: + // #3: GET https://host/apis/crew.example.com/v1 + // #4: GET https://host/apis/crew.example.com/v2 + mapping, err = lazyRestMapper.RESTMapping(schema.GroupKind{Group: "crew.example.com", Kind: "rider"}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(mapping.GroupVersionKind.Kind).To(gmg.Equal("rider")) + }) + + t.Run("LazyRESTMapper should invalidate the group cache if a version is not found", func(t *testing.T) { + g := gmg.NewWithT(t) + + httpClient, err := rest.HTTPClientFor(restCfg) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + crt := newCountingRoundTripper(httpClient.Transport) + httpClient.Transport = crt + + lazyRestMapper, err := apiutil.NewDynamicRESTMapper(restCfg, httpClient) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + s := scheme.Scheme + err = apiextensionsv1.AddToScheme(s) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + c, err := client.New(restCfg, client.Options{Scheme: s}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + // Register a new CRD ina new group to avoid collisions when deleting versions - "taxis.inventory.example.com". + group := "inventory.example.com" + kind := "Taxi" + plural := "taxis" + crdName := plural + "." + group + // Create a CRD with two versions: v1alpha1 and v1 where both are served and + // v1 is the storage version so we can easily remove v1alpha1 later. + crd := newCRD(t.Context(), g, c, group, kind, plural) + v1alpha1 := crd.Spec.Versions[0] + v1alpha1.Name = "v1alpha1" + v1alpha1.Storage = false + v1alpha1.Served = true + v1 := crd.Spec.Versions[0] + v1.Name = "v1" + v1.Storage = true + v1.Served = true + crd.Spec.Versions = []apiextensionsv1.CustomResourceDefinitionVersion{v1alpha1, v1} + g.Expect(c.Create(t.Context(), crd)).To(gmg.Succeed()) + t.Cleanup(func() { + g.Expect(c.Delete(context.Background(), crd)).To(gmg.Succeed()) //nolint:forbidigo //t.Context is cancelled in t.Cleanup + }) + + // Wait until the CRD is registered. + discHTTP, err := rest.HTTPClientFor(restCfg) + g.Expect(err).NotTo(gmg.HaveOccurred()) + discClient, err := discovery.NewDiscoveryClientForConfigAndClient(restCfg, discHTTP) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Eventually(func(g gmg.Gomega) { + _, err = discClient.ServerResourcesForGroupVersion(group + "/v1") + g.Expect(err).NotTo(gmg.HaveOccurred()) + }).Should(gmg.Succeed(), "v1 should be available") + + // There are no requests before any call + g.Expect(crt.GetRequestCount()).To(gmg.Equal(0)) + + // Since we don't specify what version we expect, restmapper will fetch them all and search there. + // To fetch a list of available versions + // #1: GET https://host/api + // #2: GET https://host/apis + // Then, for all available versions: + // #3: GET https://host/apis/inventory.example.com/v1alpha1 + // #4: GET https://host/apis/inventory.example.com/v1 + // This should fill the cache for apiGroups and versions. + mapping, err := lazyRestMapper.RESTMapping(schema.GroupKind{Group: group, Kind: kind}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(mapping.GroupVersionKind.Kind).To(gmg.Equal(kind)) + expectedAPIRequestCount := 4 + if aggregatedDiscovery { + expectedAPIRequestCount = 2 + } + g.Expect(crt.GetRequestCount()).To(gmg.Equal(expectedAPIRequestCount)) + crt.Reset() // We reset the counter to check how many additional requests are made later. + + // At this point v1alpha1 should be cached + _, err = lazyRestMapper.RESTMapping(schema.GroupKind{Group: group, Kind: kind}, "v1alpha1") + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(0)) + + // We update the CRD to only have v1 version. + g.Expect(c.Get(t.Context(), types.NamespacedName{Name: crdName}, crd)).To(gmg.Succeed()) + for _, version := range crd.Spec.Versions { + if version.Name == "v1" { + v1 = version + break + } + } + crd.Spec.Versions = []apiextensionsv1.CustomResourceDefinitionVersion{v1} + g.Expect(c.Update(t.Context(), crd)).To(gmg.Succeed()) + + // We wait until v1alpha1 is not available anymore. + g.Eventually(func(g gmg.Gomega) { + _, err = discClient.ServerResourcesForGroupVersion(group + "/v1alpha1") + g.Expect(apierrors.IsNotFound(err)).To(gmg.BeTrue(), "v1alpha1 should not be available anymore") + }).Should(gmg.Succeed()) + + // Although v1alpha1 is not available anymore, the cache is not invalidated yet so it should return a mapping. + _, err = lazyRestMapper.RESTMapping(schema.GroupKind{Group: group, Kind: kind}, "v1alpha1") + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(0)) + + // We request Limo, which is not in the mapper because it doesn't exist. + // This will trigger a reload of the lazy mapper cache. + // Reloading the cache will read v2 again and since it's not available anymore, it should invalidate the cache. + // #1: GET https://host/apis/inventory.example.com/v1alpha1 + // #2: GET https://host/apis/inventory.example.com/v1 + _, err = lazyRestMapper.RESTMapping(schema.GroupKind{Group: group, Kind: "Limo"}) + g.Expect(err).To(beNoMatchError()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(2)) + crt.Reset() + + // Now we request v1alpha1 again and it should return an error since the cache was invalidated. + // #1: GET https://host/apis/inventory.example.com/v1alpha1 + _, err = lazyRestMapper.RESTMapping(schema.GroupKind{Group: group, Kind: kind}, "v1alpha1") + g.Expect(err).To(beNoMatchError()) + g.Expect(crt.GetRequestCount()).To(gmg.Equal(1)) + + // Verify that when requesting the mapping without a version, it doesn't error + // and it returns v1. + mapping, err = lazyRestMapper.RESTMapping(schema.GroupKind{Group: group, Kind: kind}) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(mapping.Resource.Version).To(gmg.Equal("v1")) + }) + + t.Run("Restmapper should consistently return the preferred version", func(t *testing.T) { + g := gmg.NewWithT(t) + + wg := sync.WaitGroup{} + wg.Add(50) + for i := 0; i < 50; i++ { + go func() { + defer wg.Done() + httpClient, err := rest.HTTPClientFor(restCfg) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + mapper, err := apiutil.NewDynamicRESTMapper(restCfg, httpClient) + g.Expect(err).NotTo(gmg.HaveOccurred()) + + mapping, err := mapper.RESTMapping(schema.GroupKind{ + Group: "crew.example.com", + Kind: "Driver", + }) + g.Expect(err).NotTo(gmg.HaveOccurred()) + // APIServer seems to have a heuristic to prefer the higher + // version number. + g.Expect(mapping.GroupVersionKind.Version).To(gmg.Equal("v2")) + }() + } + wg.Wait() + }) + }) + } +} + +// createNewCRD creates a new CRD with the given group, kind, and plural and returns it. +func createNewCRD(ctx context.Context, g gmg.Gomega, c client.Client, group, kind, plural string) *apiextensionsv1.CustomResourceDefinition { + newCRD := newCRD(ctx, g, c, group, kind, plural) + g.Expect(c.Create(ctx, newCRD)).To(gmg.Succeed()) + + return newCRD +} + +// newCRD returns a new CRD with the given group, kind, and plural. +func newCRD(ctx context.Context, g gmg.Gomega, c client.Client, group, kind, plural string) *apiextensionsv1.CustomResourceDefinition { + crd := &apiextensionsv1.CustomResourceDefinition{} + err := c.Get(ctx, types.NamespacedName{Name: "drivers.crew.example.com"}, crd) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(crd.Spec.Names.Kind).To(gmg.Equal("Driver")) + + newCRD := &apiextensionsv1.CustomResourceDefinition{} + crd.DeepCopyInto(newCRD) + newCRD.Spec.Group = group + newCRD.Name = plural + "." + group + newCRD.Spec.Names = apiextensionsv1.CustomResourceDefinitionNames{ + Kind: kind, + Plural: plural, + } + newCRD.ResourceVersion = "" + + return newCRD +} + +func beNoMatchError() gomegatypes.GomegaMatcher { + return &errorMatcher{ + checkFunc: meta.IsNoMatchError, + message: "NoMatch", + } +} + +type errorMatcher struct { + checkFunc func(error) bool + message string +} + +func (e *errorMatcher) Match(actual interface{}) (success bool, err error) { + if actual == nil { + return false, nil + } + + actualErr, actualOk := actual.(error) + if !actualOk { + return false, fmt.Errorf("expected an error-type. got:\n%s", format.Object(actual, 1)) + } + + return e.checkFunc(actualErr), nil +} + +func (e *errorMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("to be %s error", e.message)) +} + +func (e *errorMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not to be %s error", e.message)) +} diff --git a/pkg/client/apiutil/restmapper_wb_test.go b/pkg/client/apiutil/restmapper_wb_test.go new file mode 100644 index 0000000000..73c4236724 --- /dev/null +++ b/pkg/client/apiutil/restmapper_wb_test.go @@ -0,0 +1,214 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil + +import ( + "testing" + + gmg "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/restmapper" +) + +func TestLazyRestMapper_fetchGroupVersionResourcesLocked_CacheInvalidation(t *testing.T) { + tests := []struct { + name string + groupName string + versions []string + cachedAPIGroups, expectedAPIGroups map[string]*metav1.APIGroup + cachedKnownGroups, expectedKnownGroups map[string]*restmapper.APIGroupResources + }{ + { + name: "Not found version for cached groupVersion in apiGroups and knownGroups", + groupName: "group1", + versions: []string{"v1", "v2"}, + cachedAPIGroups: map[string]*metav1.APIGroup{ + "group1": { + Name: "group1", + Versions: []metav1.GroupVersionForDiscovery{ + { + Version: "v1", + }, + }, + }, + }, + cachedKnownGroups: map[string]*restmapper.APIGroupResources{ + "group1": { + VersionedResources: map[string][]metav1.APIResource{ + "v1": { + { + Name: "resource1", + }, + }, + }, + }, + }, + expectedAPIGroups: map[string]*metav1.APIGroup{}, + expectedKnownGroups: map[string]*restmapper.APIGroupResources{}, + }, + { + name: "Not found version for cached groupVersion only in apiGroups", + groupName: "group1", + versions: []string{"v1", "v2"}, + cachedAPIGroups: map[string]*metav1.APIGroup{ + "group1": { + Name: "group1", + Versions: []metav1.GroupVersionForDiscovery{ + { + Version: "v1", + }, + }, + }, + }, + cachedKnownGroups: map[string]*restmapper.APIGroupResources{ + "group1": { + VersionedResources: map[string][]metav1.APIResource{ + "v3": { + { + Name: "resource1", + }, + }, + }, + }, + }, + expectedAPIGroups: map[string]*metav1.APIGroup{}, + expectedKnownGroups: map[string]*restmapper.APIGroupResources{ + "group1": { + VersionedResources: map[string][]metav1.APIResource{ + "v3": { + { + Name: "resource1", + }, + }, + }, + }, + }, + }, + { + name: "Not found version for cached groupVersion only in knownGroups", + groupName: "group1", + versions: []string{"v1", "v2"}, + cachedAPIGroups: map[string]*metav1.APIGroup{ + "group1": { + Name: "group1", + Versions: []metav1.GroupVersionForDiscovery{ + { + Version: "v3", + }, + }, + }, + }, + cachedKnownGroups: map[string]*restmapper.APIGroupResources{ + "group1": { + VersionedResources: map[string][]metav1.APIResource{ + "v2": { + { + Name: "resource1", + }, + }, + }, + }, + }, + expectedAPIGroups: map[string]*metav1.APIGroup{ + "group1": { + Name: "group1", + Versions: []metav1.GroupVersionForDiscovery{ + { + Version: "v3", + }, + }, + }, + }, + expectedKnownGroups: map[string]*restmapper.APIGroupResources{}, + }, + { + name: "Not found version for non cached groupVersion", + groupName: "group1", + versions: []string{"v1", "v2"}, + cachedAPIGroups: map[string]*metav1.APIGroup{ + "group1": { + Name: "group1", + Versions: []metav1.GroupVersionForDiscovery{ + { + Version: "v3", + }, + }, + }, + }, + cachedKnownGroups: map[string]*restmapper.APIGroupResources{ + "group1": { + VersionedResources: map[string][]metav1.APIResource{ + "v3": { + { + Name: "resource1", + }, + }, + }, + }, + }, + expectedAPIGroups: map[string]*metav1.APIGroup{ + "group1": { + Name: "group1", + Versions: []metav1.GroupVersionForDiscovery{ + { + Version: "v3", + }, + }, + }, + }, + expectedKnownGroups: map[string]*restmapper.APIGroupResources{ + "group1": { + VersionedResources: map[string][]metav1.APIResource{ + "v3": { + { + Name: "resource1", + }, + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := gmg.NewWithT(t) + m := &mapper{ + mapper: restmapper.NewDiscoveryRESTMapper([]*restmapper.APIGroupResources{}), + client: &fakeAggregatedDiscoveryClient{DiscoveryInterface: fake.NewSimpleClientset().Discovery()}, + apiGroups: tt.cachedAPIGroups, + knownGroups: tt.cachedKnownGroups, + } + _, err := m.fetchGroupVersionResourcesLocked(tt.groupName, tt.versions...) + g.Expect(err).NotTo(gmg.HaveOccurred()) + g.Expect(m.apiGroups).To(gmg.BeComparableTo(tt.expectedAPIGroups)) + g.Expect(m.knownGroups).To(gmg.BeComparableTo(tt.expectedKnownGroups)) + }) + } +} + +type fakeAggregatedDiscoveryClient struct { + discovery.DiscoveryInterface +} + +func (f *fakeAggregatedDiscoveryClient) GroupsAndMaybeResources() (*metav1.APIGroupList, map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error, error) { + groupList, err := f.DiscoveryInterface.ServerGroups() + return groupList, nil, nil, err +} diff --git a/pkg/client/apiutil/testdata/crd.yaml b/pkg/client/apiutil/testdata/crd.yaml new file mode 100644 index 0000000000..5bb2d73f69 --- /dev/null +++ b/pkg/client/apiutil/testdata/crd.yaml @@ -0,0 +1,62 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: drivers.crew.example.com +spec: + group: crew.example.com + names: + kind: Driver + plural: drivers + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + description: Driver is the Schema for the drivers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + spec: + type: object + status: + type: object + type: object + - name: v2 + served: true + storage: false + schema: + openAPIV3Schema: + description: Driver is the Schema for the drivers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + spec: + type: object + status: + type: object + type: object +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/pkg/client/applyconfigurations.go b/pkg/client/applyconfigurations.go new file mode 100644 index 0000000000..97192050f9 --- /dev/null +++ b/pkg/client/applyconfigurations.go @@ -0,0 +1,75 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" +) + +type unstructuredApplyConfiguration struct { + *unstructured.Unstructured +} + +func (u *unstructuredApplyConfiguration) IsApplyConfiguration() {} + +// ApplyConfigurationFromUnstructured creates a runtime.ApplyConfiguration from an *unstructured.Unstructured object. +// +// Do not use Unstructured objects here that were generated from API objects, as its impossible to tell +// if a zero value was explicitly set. +func ApplyConfigurationFromUnstructured(u *unstructured.Unstructured) runtime.ApplyConfiguration { + return &unstructuredApplyConfiguration{Unstructured: u} +} + +type applyconfigurationRuntimeObject struct { + runtime.ApplyConfiguration +} + +func (a *applyconfigurationRuntimeObject) GetObjectKind() schema.ObjectKind { + return a +} + +func (a *applyconfigurationRuntimeObject) GroupVersionKind() schema.GroupVersionKind { + return schema.GroupVersionKind{} +} + +func (a *applyconfigurationRuntimeObject) SetGroupVersionKind(gvk schema.GroupVersionKind) {} + +func (a *applyconfigurationRuntimeObject) DeepCopyObject() runtime.Object { + panic("applyconfigurationRuntimeObject does not support DeepCopyObject") +} + +func runtimeObjectFromApplyConfiguration(ac runtime.ApplyConfiguration) runtime.Object { + return &applyconfigurationRuntimeObject{ApplyConfiguration: ac} +} + +func gvkFromApplyConfiguration(ac applyConfiguration) (schema.GroupVersionKind, error) { + var gvk schema.GroupVersionKind + gv, err := schema.ParseGroupVersion(ptr.Deref(ac.GetAPIVersion(), "")) + if err != nil { + return gvk, fmt.Errorf("failed to parse %q as GroupVersion: %w", ptr.Deref(ac.GetAPIVersion(), ""), err) + } + gvk.Group = gv.Group + gvk.Version = gv.Version + gvk.Kind = ptr.Deref(ac.GetKind(), "") + + return gvk, nil +} diff --git a/pkg/client/client.go b/pkg/client/client.go index 43facbfba6..7e38142273 100644 --- a/pkg/client/client.go +++ b/pkg/client/client.go @@ -18,43 +18,118 @@ package client import ( "context" + "errors" "fmt" - "reflect" + "net/http" + "strings" "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/metadata" "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/log" ) -// Options are creation options for a Client +// Options are creation options for a Client. type Options struct { + // HTTPClient is the HTTP client to use for requests. + HTTPClient *http.Client + // Scheme, if provided, will be used to map go structs to GroupVersionKinds Scheme *runtime.Scheme // Mapper, if provided, will be used to map GroupVersionKinds to Resources Mapper meta.RESTMapper + + // Cache, if provided, is used to read objects from the cache. + Cache *CacheOptions + + // DryRun instructs the client to only perform dry run requests. + DryRun *bool } +// CacheOptions are options for creating a cache-backed client. +type CacheOptions struct { + // Reader is a cache-backed reader that will be used to read objects from the cache. + // +required + Reader Reader + // DisableFor is a list of objects that should never be read from the cache. + // Objects configured here always result in a live lookup. + DisableFor []Object + // Unstructured is a flag that indicates whether the cache-backed client should + // read unstructured objects or lists from the cache. + // If false, unstructured objects will always result in a live lookup. + Unstructured bool +} + +// NewClientFunc allows a user to define how to create a client. +type NewClientFunc func(config *rest.Config, options Options) (Client, error) + // New returns a new Client using the provided config and Options. -// The returned client reads *and* writes directly from the server -// (it doesn't use object caches). It understands how to work with -// normal types (both custom resources and aggregated/built-in resources), -// as well as unstructured types. // +// By default, the client surfaces warnings returned by the server. To +// suppress warnings, set config.WarningHandlerWithContext = rest.NoWarnings{}. To +// define custom behavior, implement the rest.WarningHandlerWithContext interface. +// See [sigs.k8s.io/controller-runtime/pkg/log.KubeAPIWarningLogger] for +// an example. +// +// The client's read behavior is determined by Options.Cache. +// If either Options.Cache or Options.Cache.Reader is nil, +// the client reads directly from the API server. +// If both Options.Cache and Options.Cache.Reader are non-nil, +// the client reads from a local cache. However, specific +// resources can still be configured to bypass the cache based +// on Options.Cache.Unstructured and Options.Cache.DisableFor. +// Write operations are always performed directly on the API server. +// +// The client understands how to work with normal types (both custom resources +// and aggregated/built-in resources), as well as unstructured types. // In the case of normal types, the scheme will be used to look up the // corresponding group, version, and kind for the given type. In the -// case of unstrctured types, the group, version, and kind will be extracted +// case of unstructured types, the group, version, and kind will be extracted // from the corresponding fields on the object. -func New(config *rest.Config, options Options) (Client, error) { +func New(config *rest.Config, options Options) (c Client, err error) { + c, err = newClient(config, options) + if err == nil && options.DryRun != nil && *options.DryRun { + c = NewDryRunClient(c) + } + return c, err +} + +func newClient(config *rest.Config, options Options) (*client, error) { if config == nil { return nil, fmt.Errorf("must provide non-nil rest.Config to client.New") } + config = rest.CopyConfig(config) + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + if config.WarningHandler == nil && config.WarningHandlerWithContext == nil { + // By default, we surface warnings. + config.WarningHandlerWithContext = log.NewKubeAPIWarningLogger( + log.KubeAPIWarningLoggerOptions{ + Deduplicate: false, + }, + ) + } + + // Use the rest HTTP client for the provided config if unset + if options.HTTPClient == nil { + var err error + options.HTTPClient, err = rest.HTTPClientFor(config) + if err != nil { + return nil, err + } + } + // Init a scheme if none provided if options.Scheme == nil { options.Scheme = scheme.Scheme @@ -63,127 +138,494 @@ func New(config *rest.Config, options Options) (Client, error) { // Init a Mapper if none provided if options.Mapper == nil { var err error - options.Mapper, err = apiutil.NewDiscoveryRESTMapper(config) + options.Mapper, err = apiutil.NewDynamicRESTMapper(config, options.HTTPClient) if err != nil { return nil, err } } - dynamicClient, err := dynamic.NewForConfig(config) + resources := &clientRestResources{ + httpClient: options.HTTPClient, + config: config, + scheme: options.Scheme, + mapper: options.Mapper, + codecs: serializer.NewCodecFactory(options.Scheme), + + structuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), + unstructuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), + } + + rawMetaClient, err := metadata.NewForConfigAndClient(metadata.ConfigFor(config), options.HTTPClient) if err != nil { - return nil, err + return nil, fmt.Errorf("unable to construct metadata-only client for use as part of client: %w", err) } c := &client{ typedClient: typedClient{ - cache: clientCache{ - config: config, - scheme: options.Scheme, - mapper: options.Mapper, - codecs: serializer.NewCodecFactory(options.Scheme), - resourceByType: make(map[reflect.Type]*resourceMeta), - }, + resources: resources, paramCodec: runtime.NewParameterCodec(options.Scheme), }, unstructuredClient: unstructuredClient{ - client: dynamicClient, + resources: resources, + paramCodec: noConversionParamCodec{}, + }, + metadataClient: metadataClient{ + client: rawMetaClient, restMapper: options.Mapper, }, + scheme: options.Scheme, + mapper: options.Mapper, + } + if options.Cache == nil || options.Cache.Reader == nil { + return c, nil } + // We want a cache if we're here. + // Set the cache. + c.cache = options.Cache.Reader + + // Load uncached GVKs. + c.cacheUnstructured = options.Cache.Unstructured + c.uncachedGVKs = map[schema.GroupVersionKind]struct{}{} + for _, obj := range options.Cache.DisableFor { + gvk, err := c.GroupVersionKindFor(obj) + if err != nil { + return nil, err + } + c.uncachedGVKs[gvk] = struct{}{} + } return c, nil } var _ Client = &client{} -// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes -// new clients at the time they are used, and caches the client. +// client is a client.Client configured to either read from a local cache or directly from the API server. +// Write operations are always performed directly on the API server. +// It lazily initializes new clients at the time they are used. type client struct { typedClient typedClient unstructuredClient unstructuredClient + metadataClient metadataClient + scheme *runtime.Scheme + mapper meta.RESTMapper + + cache Reader + uncachedGVKs map[schema.GroupVersionKind]struct{} + cacheUnstructured bool } -// Create implements client.Client -func (c *client) Create(ctx context.Context, obj runtime.Object, opts ...CreateOptionFunc) error { - _, ok := obj.(*unstructured.Unstructured) - if ok { +func (c *client) shouldBypassCache(obj runtime.Object) (bool, error) { + if c.cache == nil { + return true, nil + } + + gvk, err := c.GroupVersionKindFor(obj) + if err != nil { + return false, err + } + // TODO: this is producing unsafe guesses that don't actually work, + // but it matches ~99% of the cases out there. + if meta.IsListType(obj) { + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + } + if _, isUncached := c.uncachedGVKs[gvk]; isUncached { + return true, nil + } + if !c.cacheUnstructured { + _, isUnstructured := obj.(runtime.Unstructured) + return isUnstructured, nil + } + return false, nil +} + +// resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object. +func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersionKind) { + if gvk != schema.EmptyObjectKind.GroupVersionKind() { + if v, ok := obj.(schema.ObjectKind); ok { + v.SetGroupVersionKind(gvk) + } + } +} + +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (c *client) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return apiutil.GVKForObject(obj, c.scheme) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (c *client) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return apiutil.IsObjectNamespaced(obj, c.scheme, c.mapper) +} + +// Scheme returns the scheme this client is using. +func (c *client) Scheme() *runtime.Scheme { + return c.scheme +} + +// RESTMapper returns the scheme this client is using. +func (c *client) RESTMapper() meta.RESTMapper { + return c.mapper +} + +// Create implements client.Client. +func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + switch obj.(type) { + case runtime.Unstructured: return c.unstructuredClient.Create(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot create using only metadata") + default: + return c.typedClient.Create(ctx, obj, opts...) } - return c.typedClient.Create(ctx, obj, opts...) } -// Update implements client.Client -func (c *client) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error { - _, ok := obj.(*unstructured.Unstructured) - if ok { +// Update implements client.Client. +func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case runtime.Unstructured: return c.unstructuredClient.Update(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update using only metadata -- did you mean to patch?") + default: + return c.typedClient.Update(ctx, obj, opts...) } - return c.typedClient.Update(ctx, obj, opts...) } -// Delete implements client.Client -func (c *client) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOptionFunc) error { - _, ok := obj.(*unstructured.Unstructured) - if ok { +// Delete implements client.Client. +func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + switch obj.(type) { + case runtime.Unstructured: return c.unstructuredClient.Delete(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Delete(ctx, obj, opts...) + default: + return c.typedClient.Delete(ctx, obj, opts...) + } +} + +// DeleteAllOf implements client.Client. +func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + switch obj.(type) { + case runtime.Unstructured: + return c.unstructuredClient.DeleteAllOf(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.DeleteAllOf(ctx, obj, opts...) + default: + return c.typedClient.DeleteAllOf(ctx, obj, opts...) } - return c.typedClient.Delete(ctx, obj, opts...) } -// Patch implements client.Client -func (c *client) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error { - _, ok := obj.(*unstructured.Unstructured) - if ok { +// Patch implements client.Client. +func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case runtime.Unstructured: return c.unstructuredClient.Patch(ctx, obj, patch, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Patch(ctx, obj, patch, opts...) + default: + return c.typedClient.Patch(ctx, obj, patch, opts...) } - return c.typedClient.Patch(ctx, obj, patch, opts...) } -// Get implements client.Client -func (c *client) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { - _, ok := obj.(*unstructured.Unstructured) - if ok { - return c.unstructuredClient.Get(ctx, key, obj) +func (c *client) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...ApplyOption) error { + switch obj := obj.(type) { + case *unstructuredApplyConfiguration: + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + return c.unstructuredClient.Apply(ctx, obj, opts...) + default: + return c.typedClient.Apply(ctx, obj, opts...) } - return c.typedClient.Get(ctx, key, obj) } -// List implements client.Client -func (c *client) List(ctx context.Context, obj runtime.Object, opts ...ListOptionFunc) error { - _, ok := obj.(*unstructured.UnstructuredList) - if ok { +// Get implements client.Client. +func (c *client) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + if isUncached, err := c.shouldBypassCache(obj); err != nil { + return err + } else if !isUncached { + // Attempt to get from the cache. + return c.cache.Get(ctx, key, obj, opts...) + } + + // Perform a live lookup. + switch obj.(type) { + case runtime.Unstructured: + return c.unstructuredClient.Get(ctx, key, obj, opts...) + case *metav1.PartialObjectMetadata: + // Metadata only object should always preserve the GVK coming in from the caller. + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + return c.metadataClient.Get(ctx, key, obj, opts...) + default: + return c.typedClient.Get(ctx, key, obj, opts...) + } +} + +// List implements client.Client. +func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + if isUncached, err := c.shouldBypassCache(obj); err != nil { + return err + } else if !isUncached { + // Attempt to get from the cache. + return c.cache.List(ctx, obj, opts...) + } + + // Perform a live lookup. + switch x := obj.(type) { + case runtime.Unstructured: return c.unstructuredClient.List(ctx, obj, opts...) + case *metav1.PartialObjectMetadataList: + // Metadata only object should always preserve the GVK. + gvk := obj.GetObjectKind().GroupVersionKind() + defer c.resetGroupVersionKind(obj, gvk) + + // Call the list client. + if err := c.metadataClient.List(ctx, obj, opts...); err != nil { + return err + } + + // Restore the GVK for each item in the list. + itemGVK := schema.GroupVersionKind{ + Group: gvk.Group, + Version: gvk.Version, + // TODO: this is producing unsafe guesses that don't actually work, + // but it matches ~99% of the cases out there. + Kind: strings.TrimSuffix(gvk.Kind, "List"), + } + for i := range x.Items { + item := &x.Items[i] + item.SetGroupVersionKind(itemGVK) + } + + return nil + default: + return c.typedClient.List(ctx, obj, opts...) + } +} + +// Status implements client.StatusClient. +func (c *client) Status() SubResourceWriter { + return c.SubResource("status") +} + +func (c *client) SubResource(subResource string) SubResourceClient { + return &subResourceClient{client: c, subResource: subResource} +} + +// subResourceClient is client.SubResourceWriter that writes to subresources. +type subResourceClient struct { + client *client + subResource string +} + +// ensure subResourceClient implements client.SubResourceClient. +var _ SubResourceClient = &subResourceClient{} + +// SubResourceGetOptions holds all the possible configuration +// for a subresource Get request. +type SubResourceGetOptions struct { + Raw *metav1.GetOptions +} + +// ApplyToSubResourceGet updates the configuaration to the given get options. +func (getOpt *SubResourceGetOptions) ApplyToSubResourceGet(o *SubResourceGetOptions) { + if getOpt.Raw != nil { + o.Raw = getOpt.Raw + } +} + +// ApplyOptions applues the given options. +func (getOpt *SubResourceGetOptions) ApplyOptions(opts []SubResourceGetOption) *SubResourceGetOptions { + for _, o := range opts { + o.ApplyToSubResourceGet(getOpt) + } + + return getOpt +} + +// AsGetOptions returns the configured options as *metav1.GetOptions. +func (getOpt *SubResourceGetOptions) AsGetOptions() *metav1.GetOptions { + if getOpt.Raw == nil { + return &metav1.GetOptions{} + } + return getOpt.Raw +} + +// SubResourceUpdateOptions holds all the possible configuration +// for a subresource update request. +type SubResourceUpdateOptions struct { + UpdateOptions + SubResourceBody Object +} + +// ApplyToSubResourceUpdate updates the configuration on the given create options +func (uo *SubResourceUpdateOptions) ApplyToSubResourceUpdate(o *SubResourceUpdateOptions) { + uo.UpdateOptions.ApplyToUpdate(&o.UpdateOptions) + if uo.SubResourceBody != nil { + o.SubResourceBody = uo.SubResourceBody + } +} + +// ApplyOptions applies the given options. +func (uo *SubResourceUpdateOptions) ApplyOptions(opts []SubResourceUpdateOption) *SubResourceUpdateOptions { + for _, o := range opts { + o.ApplyToSubResourceUpdate(uo) + } + + return uo +} + +// SubResourceUpdateAndPatchOption is an option that can be used for either +// a subresource update or patch request. +type SubResourceUpdateAndPatchOption interface { + SubResourceUpdateOption + SubResourcePatchOption +} + +// WithSubResourceBody returns an option that uses the given body +// for a subresource Update or Patch operation. +func WithSubResourceBody(body Object) SubResourceUpdateAndPatchOption { + return &withSubresourceBody{body: body} +} + +type withSubresourceBody struct { + body Object +} + +func (wsr *withSubresourceBody) ApplyToSubResourceUpdate(o *SubResourceUpdateOptions) { + o.SubResourceBody = wsr.body +} + +func (wsr *withSubresourceBody) ApplyToSubResourcePatch(o *SubResourcePatchOptions) { + o.SubResourceBody = wsr.body +} + +// SubResourceCreateOptions are all the possible configurations for a subresource +// create request. +type SubResourceCreateOptions struct { + CreateOptions +} + +// ApplyOptions applies the given options. +func (co *SubResourceCreateOptions) ApplyOptions(opts []SubResourceCreateOption) *SubResourceCreateOptions { + for _, o := range opts { + o.ApplyToSubResourceCreate(co) } - return c.typedClient.List(ctx, obj, opts...) + + return co +} + +// ApplyToSubResourceCreate applies the the configuration on the given create options. +func (co *SubResourceCreateOptions) ApplyToSubResourceCreate(o *SubResourceCreateOptions) { + co.CreateOptions.ApplyToCreate(&co.CreateOptions) } -// Status implements client.StatusClient -func (c *client) Status() StatusWriter { - return &statusWriter{client: c} +// SubResourcePatchOptions holds all possible configurations for a subresource patch +// request. +type SubResourcePatchOptions struct { + PatchOptions + SubResourceBody Object } -// statusWriter is client.StatusWriter that writes status subresource -type statusWriter struct { - client *client +// ApplyOptions applies the given options. +func (po *SubResourcePatchOptions) ApplyOptions(opts []SubResourcePatchOption) *SubResourcePatchOptions { + for _, o := range opts { + o.ApplyToSubResourcePatch(po) + } + + return po } -// ensure statusWriter implements client.StatusWriter -var _ StatusWriter = &statusWriter{} +// ApplyToSubResourcePatch applies the configuration on the given patch options. +func (po *SubResourcePatchOptions) ApplyToSubResourcePatch(o *SubResourcePatchOptions) { + po.PatchOptions.ApplyToPatch(&o.PatchOptions) + if po.SubResourceBody != nil { + o.SubResourceBody = po.SubResourceBody + } +} + +// SubResourceApplyOptions are the options for a subresource +// apply request. +type SubResourceApplyOptions struct { + ApplyOptions + SubResourceBody runtime.ApplyConfiguration +} + +// ApplyOpts applies the given options. +func (ao *SubResourceApplyOptions) ApplyOpts(opts []SubResourceApplyOption) *SubResourceApplyOptions { + for _, o := range opts { + o.ApplyToSubResourceApply(ao) + } + + return ao +} + +// ApplyToSubResourceApply applies the configuration on the given patch options. +func (ao *SubResourceApplyOptions) ApplyToSubResourceApply(o *SubResourceApplyOptions) { + ao.ApplyOptions.ApplyToApply(&o.ApplyOptions) + if ao.SubResourceBody != nil { + o.SubResourceBody = ao.SubResourceBody + } +} + +func (sc *subResourceClient) Get(ctx context.Context, obj Object, subResource Object, opts ...SubResourceGetOption) error { + switch obj.(type) { + case runtime.Unstructured: + return sc.client.unstructuredClient.GetSubResource(ctx, obj, subResource, sc.subResource, opts...) + case *metav1.PartialObjectMetadata: + return errors.New("can not get subresource using only metadata") + default: + return sc.client.typedClient.GetSubResource(ctx, obj, subResource, sc.subResource, opts...) + } +} + +// Create implements client.SubResourceClient +func (sc *subResourceClient) Create(ctx context.Context, obj Object, subResource Object, opts ...SubResourceCreateOption) error { + defer sc.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + defer sc.client.resetGroupVersionKind(subResource, subResource.GetObjectKind().GroupVersionKind()) + + switch obj.(type) { + case runtime.Unstructured: + return sc.client.unstructuredClient.CreateSubResource(ctx, obj, subResource, sc.subResource, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?") + default: + return sc.client.typedClient.CreateSubResource(ctx, obj, subResource, sc.subResource, opts...) + } +} + +// Update implements client.SubResourceClient +func (sc *subResourceClient) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { + defer sc.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case runtime.Unstructured: + return sc.client.unstructuredClient.UpdateSubResource(ctx, obj, sc.subResource, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?") + default: + return sc.client.typedClient.UpdateSubResource(ctx, obj, sc.subResource, opts...) + } +} -// Update implements client.StatusWriter -func (sw *statusWriter) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error { - _, ok := obj.(*unstructured.Unstructured) - if ok { - return sw.client.unstructuredClient.UpdateStatus(ctx, obj, opts...) +// Patch implements client.SubResourceWriter. +func (sc *subResourceClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { + defer sc.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case runtime.Unstructured: + return sc.client.unstructuredClient.PatchSubResource(ctx, obj, sc.subResource, patch, opts...) + case *metav1.PartialObjectMetadata: + return sc.client.metadataClient.PatchSubResource(ctx, obj, sc.subResource, patch, opts...) + default: + return sc.client.typedClient.PatchSubResource(ctx, obj, sc.subResource, patch, opts...) } - return sw.client.typedClient.UpdateStatus(ctx, obj, opts...) } -// Patch implements client.Client -func (sw *statusWriter) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error { - _, ok := obj.(*unstructured.Unstructured) - if ok { - return sw.client.unstructuredClient.PatchStatus(ctx, obj, patch, opts...) +func (sc *subResourceClient) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...SubResourceApplyOption) error { + switch obj := obj.(type) { + case *unstructuredApplyConfiguration: + defer sc.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + return sc.client.unstructuredClient.ApplySubResource(ctx, obj, sc.subResource, opts...) + default: + return sc.client.typedClient.ApplySubResource(ctx, obj, sc.subResource, opts...) } - return sw.client.typedClient.PatchStatus(ctx, obj, patch, opts...) } diff --git a/pkg/client/client_cache.go b/pkg/client/client_cache.go deleted file mode 100644 index 2a1ff05d50..0000000000 --- a/pkg/client/client_cache.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "reflect" - "strings" - "sync" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" -) - -// clientCache creates and caches rest clients and metadata for Kubernetes types -type clientCache struct { - // config is the rest.Config to talk to an apiserver - config *rest.Config - - // scheme maps go structs to GroupVersionKinds - scheme *runtime.Scheme - - // mapper maps GroupVersionKinds to Resources - mapper meta.RESTMapper - - // codecs are used to create a REST client for a gvk - codecs serializer.CodecFactory - - // resourceByType caches type metadata - resourceByType map[reflect.Type]*resourceMeta - mu sync.RWMutex -} - -// newResource maps obj to a Kubernetes Resource and constructs a client for that Resource. -// If the object is a list, the resource represents the item's type instead. -func (c *clientCache) newResource(obj runtime.Object) (*resourceMeta, error) { - gvk, err := apiutil.GVKForObject(obj, c.scheme) - if err != nil { - return nil, err - } - - if strings.HasSuffix(gvk.Kind, "List") && meta.IsListType(obj) { - // if this was a list, treat it as a request for the item's resource - gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] - } - - client, err := apiutil.RESTClientForGVK(gvk, c.config, c.codecs) - if err != nil { - return nil, err - } - mapping, err := c.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, err - } - return &resourceMeta{Interface: client, mapping: mapping, gvk: gvk}, nil -} - -// getResource returns the resource meta information for the given type of object. -// If the object is a list, the resource represents the item's type instead. -func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { - typ := reflect.TypeOf(obj) - - // It's better to do creation work twice than to not let multiple - // people make requests at once - c.mu.RLock() - r, known := c.resourceByType[typ] - c.mu.RUnlock() - - if known { - return r, nil - } - - // Initialize a new Client - c.mu.Lock() - defer c.mu.Unlock() - r, err := c.newResource(obj) - if err != nil { - return nil, err - } - c.resourceByType[typ] = r - return r, err -} - -// getObjMeta returns objMeta containing both type and object metadata and state -func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) { - r, err := c.getResource(obj) - if err != nil { - return nil, err - } - m, err := meta.Accessor(obj) - if err != nil { - return nil, err - } - return &objMeta{resourceMeta: r, Object: m}, err -} - -// resourceMeta caches state for a Kubernetes type. -type resourceMeta struct { - // client is the rest client used to talk to the apiserver - rest.Interface - // gvk is the GroupVersionKind of the resourceMeta - gvk schema.GroupVersionKind - // mapping is the rest mapping - mapping *meta.RESTMapping -} - -// isNamespaced returns true if the type is namespaced -func (r *resourceMeta) isNamespaced() bool { - if r.mapping.Scope.Name() == meta.RESTScopeNameRoot { - return false - } - return true -} - -// resource returns the resource name of the type -func (r *resourceMeta) resource() string { - return r.mapping.Resource.Resource -} - -// objMeta stores type and object information about a Kubernetes type -type objMeta struct { - // resourceMeta contains type information for the object - *resourceMeta - - // Object contains meta data for the object instance - metav1.Object -} diff --git a/pkg/client/client_rest_resources.go b/pkg/client/client_rest_resources.go new file mode 100644 index 0000000000..acff7a46a4 --- /dev/null +++ b/pkg/client/client_rest_resources.go @@ -0,0 +1,200 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "fmt" + "net/http" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/rest" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// clientRestResources creates and stores rest clients and metadata for Kubernetes types. +type clientRestResources struct { + // httpClient is the http client to use for requests + httpClient *http.Client + + // config is the rest.Config to talk to an apiserver + config *rest.Config + + // scheme maps go structs to GroupVersionKinds + scheme *runtime.Scheme + + // mapper maps GroupVersionKinds to Resources + mapper meta.RESTMapper + + // codecs are used to create a REST client for a gvk + codecs serializer.CodecFactory + + // structuredResourceByType stores structured type metadata + structuredResourceByType map[schema.GroupVersionKind]*resourceMeta + // unstructuredResourceByType stores unstructured type metadata + unstructuredResourceByType map[schema.GroupVersionKind]*resourceMeta + mu sync.RWMutex +} + +// newResource maps obj to a Kubernetes Resource and constructs a client for that Resource. +// If the object is a list, the resource represents the item's type instead. +func (c *clientRestResources) newResource(gvk schema.GroupVersionKind, + isList bool, + forceDisableProtoBuf bool, + isUnstructured bool, +) (*resourceMeta, error) { + if strings.HasSuffix(gvk.Kind, "List") && isList { + // if this was a list, treat it as a request for the item's resource + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + client, err := apiutil.RESTClientForGVK(gvk, forceDisableProtoBuf, isUnstructured, c.config, c.codecs, c.httpClient) + if err != nil { + return nil, err + } + mapping, err := c.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + return &resourceMeta{Interface: client, mapping: mapping, gvk: gvk}, nil +} + +type applyConfiguration interface { + GetName() *string + GetNamespace() *string + GetKind() *string + GetAPIVersion() *string +} + +// getResource returns the resource meta information for the given type of object. +// If the object is a list, the resource represents the item's type instead. +func (c *clientRestResources) getResource(obj any) (*resourceMeta, error) { + var gvk schema.GroupVersionKind + var err error + var isApplyConfiguration bool + switch o := obj.(type) { + case runtime.Object: + gvk, err = apiutil.GVKForObject(o, c.scheme) + if err != nil { + return nil, err + } + case runtime.ApplyConfiguration: + ac, ok := o.(applyConfiguration) + if !ok { + return nil, fmt.Errorf("%T is a runtime.ApplyConfiguration but not an applyConfiguration", o) + } + gvk, err = gvkFromApplyConfiguration(ac) + if err != nil { + return nil, err + } + isApplyConfiguration = true + default: + return nil, fmt.Errorf("bug: %T is neither a runtime.Object nor a runtime.ApplyConfiguration", o) + } + + _, isUnstructured := obj.(runtime.Unstructured) + forceDisableProtoBuf := isUnstructured || isApplyConfiguration + + // It's better to do creation work twice than to not let multiple + // people make requests at once + c.mu.RLock() + resourceByType := c.structuredResourceByType + if isUnstructured { + resourceByType = c.unstructuredResourceByType + } + r, known := resourceByType[gvk] + c.mu.RUnlock() + + if known { + return r, nil + } + + var isList bool + if runtimeObject, ok := obj.(runtime.Object); ok && meta.IsListType(runtimeObject) { + isList = true + } + + // Initialize a new Client + c.mu.Lock() + defer c.mu.Unlock() + r, err = c.newResource(gvk, isList, forceDisableProtoBuf, isUnstructured) + if err != nil { + return nil, err + } + resourceByType[gvk] = r + return r, err +} + +// getObjMeta returns objMeta containing both type and object metadata and state. +func (c *clientRestResources) getObjMeta(obj any) (*objMeta, error) { + r, err := c.getResource(obj) + if err != nil { + return nil, err + } + objMeta := &objMeta{resourceMeta: r} + + switch o := obj.(type) { + case runtime.Object: + m, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + objMeta.namespace = m.GetNamespace() + objMeta.name = m.GetName() + case applyConfiguration: + objMeta.namespace = ptr.Deref(o.GetNamespace(), "") + objMeta.name = ptr.Deref(o.GetName(), "") + default: + return nil, fmt.Errorf("object %T is neither a runtime.Object nor a runtime.ApplyConfiguration", obj) + } + + return objMeta, nil +} + +// resourceMeta stores state for a Kubernetes type. +type resourceMeta struct { + // client is the rest client used to talk to the apiserver + rest.Interface + // gvk is the GroupVersionKind of the resourceMeta + gvk schema.GroupVersionKind + // mapping is the rest mapping + mapping *meta.RESTMapping +} + +// isNamespaced returns true if the type is namespaced. +func (r *resourceMeta) isNamespaced() bool { + return r.mapping.Scope.Name() != meta.RESTScopeNameRoot +} + +// resource returns the resource name of the type. +func (r *resourceMeta) resource() string { + return r.mapping.Resource.Resource +} + +// objMeta stores type and object information about a Kubernetes type. +type objMeta struct { + // resourceMeta contains type information for the object + *resourceMeta + + namespace string + name string +} diff --git a/pkg/client/client_suite_test.go b/pkg/client/client_suite_test.go index 9ffcd17bd3..89cab3f7ed 100644 --- a/pkg/client/client_suite_test.go +++ b/pkg/client/client_suite_test.go @@ -17,31 +17,48 @@ limitations under the License. package client_test import ( + "bytes" + "io" "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/examples/crd/pkg" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" ) -func TestSource(t *testing.T) { +func TestClient(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, "Controller Integration Suite", []Reporter{envtest.NewlineReporter{}}) + RunSpecs(t, "Client Suite") } -var testenv *envtest.Environment -var cfg *rest.Config -var clientset *kubernetes.Clientset +var ( + testenv *envtest.Environment + cfg *rest.Config + clientset *kubernetes.Clientset -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + // Used by tests to inspect controller and client log messages. + log bytes.Buffer +) + +var _ = BeforeSuite(func() { + // Forwards logs to ginkgo output, and allows tests to inspect logs. + mw := io.MultiWriter(&log, GinkgoWriter) - testenv = &envtest.Environment{} + // Use prefixes to help us tell the source of the log message. + // controller-runtime uses logf + logf.SetLogger(zap.New(zap.WriteTo(mw), zap.UseDevMode(true)).WithName("logf")) + // client-go logs uses klog + klog.SetLogger(zap.New(zap.WriteTo(mw), zap.UseDevMode(true)).WithName("klog")) + + testenv = &envtest.Environment{CRDDirectoryPaths: []string{"./testdata"}} var err error cfg, err = testenv.Start() @@ -50,8 +67,8 @@ var _ = BeforeSuite(func(done Done) { clientset, err = kubernetes.NewForConfig(cfg) Expect(err).NotTo(HaveOccurred()) - close(done) -}, 60) + Expect(pkg.AddToScheme(scheme.Scheme)).NotTo(HaveOccurred()) +}) var _ = AfterSuite(func() { Expect(testenv.Stop()).To(Succeed()) diff --git a/pkg/client/client_test.go b/pkg/client/client_test.go index ad96f4ccb4..42e14771cc 100644 --- a/pkg/client/client_test.go +++ b/pkg/client/client_test.go @@ -17,60 +17,147 @@ limitations under the License. package client_test import ( + "bufio" + "bytes" "context" "encoding/json" + "errors" "fmt" + "reflect" + "strings" "sync/atomic" + "time" - "k8s.io/apimachinery/pkg/types" - - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" + authenticationv1 "k8s.io/api/authentication/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + certificatesv1 "k8s.io/api/certificates/v1" corev1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/client" - + "k8s.io/apimachinery/pkg/types" + appsv1applyconfigurations "k8s.io/client-go/applyconfigurations/apps/v1" + autoscaling1applyconfigurations "k8s.io/client-go/applyconfigurations/autoscaling/v1" + corev1applyconfigurations "k8s.io/client-go/applyconfigurations/core/v1" kscheme "k8s.io/client-go/kubernetes/scheme" -) + "k8s.io/client-go/rest" + "k8s.io/utils/ptr" -const serverSideTimeoutSeconds = 10 + "sigs.k8s.io/controller-runtime/examples/crd/pkg" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" +) -func deleteDeployment(dep *appsv1.Deployment, ns string) { - _, err := clientset.AppsV1().Deployments(ns).Get(dep.Name, metav1.GetOptions{}) +func deleteDeployment(ctx context.Context, dep *appsv1.Deployment, ns string) { + _, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) if err == nil { - err = clientset.AppsV1().Deployments(ns).Delete(dep.Name, &metav1.DeleteOptions{}) + err = clientset.AppsV1().Deployments(ns).Delete(ctx, dep.Name, metav1.DeleteOptions{}) Expect(err).NotTo(HaveOccurred()) } } -func deleteNamespace(ns *corev1.Namespace) { - _, err := clientset.CoreV1().Namespaces().Get(ns.Name, metav1.GetOptions{}) - if err == nil { - err = clientset.CoreV1().Namespaces().Delete(ns.Name, &metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) +func deleteNamespace(ctx context.Context, ns *corev1.Namespace) { + ns, err := clientset.CoreV1().Namespaces().Get(ctx, ns.Name, metav1.GetOptions{}) + if err != nil { + return + } + + err = clientset.CoreV1().Namespaces().Delete(ctx, ns.Name, metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // finalize if necessary + pos := -1 + finalizers := ns.Spec.Finalizers + for i, fin := range finalizers { + if fin == "kubernetes" { + pos = i + break + } + } + if pos == -1 { + // no need to finalize + return + } + + // re-get in order to finalize + ns, err = clientset.CoreV1().Namespaces().Get(ctx, ns.Name, metav1.GetOptions{}) + if err != nil { + return + } + + ns.Spec.Finalizers = append(finalizers[:pos], finalizers[pos+1:]...) + _, err = clientset.CoreV1().Namespaces().Finalize(ctx, ns, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + +WAIT_LOOP: + for i := 0; i < 10; i++ { + ns, err = clientset.CoreV1().Namespaces().Get(ctx, ns.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + // success! + return + } + select { + case <-ctx.Done(): + break WAIT_LOOP + // failed to delete in time, see failure below + case <-time.After(100 * time.Millisecond): + // do nothing, try again + } + } + Fail(fmt.Sprintf("timed out waiting for namespace %q to be deleted", ns.Name)) +} + +type mockPatchOption struct { + applied bool +} + +func (o *mockPatchOption) ApplyToPatch(_ *client.PatchOptions) { + o.applied = true +} + +// metaOnlyFromObj returns PartialObjectMetadata from a concrete Go struct that +// returns a concrete *metav1.ObjectMeta from GetObjectMeta (yes, that plays a +// bit fast and loose, but the only other options are serializing and then +// deserializing, or manually calling all the accessor funcs, which are both a bit annoying). +func metaOnlyFromObj(obj interface { + runtime.Object + metav1.ObjectMetaAccessor +}, scheme *runtime.Scheme) *metav1.PartialObjectMetadata { + metaObj := metav1.PartialObjectMetadata{} + obj.GetObjectMeta().(*metav1.ObjectMeta).DeepCopyInto(&metaObj.ObjectMeta) + kinds, _, err := scheme.ObjectKinds(obj) + if err != nil { + panic(err) } + metaObj.SetGroupVersionKind(kinds[0]) + return &metaObj } var _ = Describe("Client", func() { var scheme *runtime.Scheme + var depGvk schema.GroupVersionKind var dep *appsv1.Deployment var pod *corev1.Pod var node *corev1.Node + var serviceAccount *corev1.ServiceAccount + var csr *certificatesv1.CertificateSigningRequest var count uint64 = 0 var replicaCount int32 = 2 var ns = "default" - var mergePatch []byte + var errNotCached *cache.ErrResourceNotCached - BeforeEach(func(done Done) { + BeforeEach(func() { atomic.AddUint64(&count, 1) dep = &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("deployment-name-%v", count), Namespace: ns}, + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("deployment-name-%v", count), Namespace: ns, Labels: map[string]string{"app": fmt.Sprintf("bar-%v", count)}}, Spec: appsv1.DeploymentSpec{ Replicas: &replicaCount, Selector: &metav1.LabelSelector{ @@ -82,6 +169,11 @@ var _ = Describe("Client", func() { }, }, } + depGvk = schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + } // Pod is invalid without a container field in the PodSpec pod = &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("pod-%v", count), Namespace: ns}, @@ -91,22 +183,35 @@ var _ = Describe("Client", func() { ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("node-name-%v", count)}, Spec: corev1.NodeSpec{}, } - scheme = kscheme.Scheme - var err error - mergePatch, err = json.Marshal(map[string]interface{}{ - "metadata": map[string]interface{}{ - "annotations": map[string]interface{}{ - "foo": "bar", - }, + serviceAccount = &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("sa-%v", count), Namespace: ns}} + csr = &certificatesv1.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("csr-%v", count)}, + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "org.io/my-signer", + Request: []byte(`-----BEGIN CERTIFICATE REQUEST----- +MIIChzCCAW8CAQAwQjELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0 +eTEcMBoGA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBANe06dLX/bDNm6mVEnKdJexcJM6WKMFSt5o6BEdD1+Ki +WyUcvfNgIBbwAZjkF9U1r7+KuDcc6XYFnb6ky1wPo4C+XwcIIx7Nnbf8IdWJukPb +2BCsqO4NCsG6kKFavmH9J3q//nwKUvlQE+AJ2MPuOAZTwZ4KskghiGuS8hyk6/PZ +XH9QhV7Jma43bDzQozd2C7OujRBhLsuP94KSu839RRFWd9ms3XHgTxLxb7nxwZDx +9l7/ZVAObJoQYlHENqs12NCVP4gpJfbcY8/rd+IG4ftcZEmpeO4kKO+d2TpRKQqw +bjCMoAdD5Y43iLTtyql4qRnbMe3nxYG2+1inEryuV/cCAwEAAaAAMA0GCSqGSIb3 +DQEBCwUAA4IBAQDH5hDByRN7wERQtC/o6uc8Y+yhjq9YcBJjjbnD6Vwru5pOdWtx +qfKkkXI5KNOdEhWzLnJyOcWHjj8UoHqI3AjxGC7dTM95eGjxQGUpsUOX8JSd4MiZ +cct4g4BKBj02AGqZLiEgN+PLCYAmEaYU7oZc4OAh6WzMrljNRsj66awMQpw8O1eY +YuBa8vwz8ko8vn/pn7IrFu8cZ+EA3rluJ+budX/QrEGi1hijg27q7/Qr0wNI9f1v +086mLKdqaBTkblXWEvF3WP4CcLNyrSNi4eu+G0fcAgGp1F/Nqh0MuWKSOLprv5Om +U5wwSivyi7vmegHKmblOzNVKA5qPO8zWzqBC +-----END CERTIFICATE REQUEST-----`), + Usages: []certificatesv1.KeyUsage{certificatesv1.UsageClientAuth}, }, - }) - Expect(err).NotTo(HaveOccurred()) - - close(done) - }, serverSideTimeoutSeconds) + } + scheme = kscheme.Scheme + }) var delOptions *metav1.DeleteOptions - AfterEach(func(done Done) { + AfterEach(func(ctx SpecContext) { // Cleanup var zero int64 = 0 policy := metav1.DeletePropagationForeground @@ -114,107 +219,191 @@ var _ = Describe("Client", func() { GracePeriodSeconds: &zero, PropagationPolicy: &policy, } - deleteDeployment(dep, ns) - _, err := clientset.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) + deleteDeployment(ctx, dep, ns) + _, err := clientset.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) if err == nil { - err = clientset.CoreV1().Nodes().Delete(node.Name, delOptions) + err = clientset.CoreV1().Nodes().Delete(ctx, node.Name, *delOptions) Expect(err).NotTo(HaveOccurred()) } - close(done) - }, serverSideTimeoutSeconds) + err = clientset.CoreV1().ServiceAccounts(ns).Delete(ctx, serviceAccount.Name, *delOptions) + Expect(client.IgnoreNotFound(err)).NotTo(HaveOccurred()) + + err = clientset.CertificatesV1().CertificateSigningRequests().Delete(ctx, csr.Name, *delOptions) + Expect(client.IgnoreNotFound(err)).NotTo(HaveOccurred()) + }) + + Describe("WarningHandler", func() { + It("should log warnings with config.WarningHandler, if one is defined", func(ctx SpecContext) { + cache := &fakeReader{} + + testCfg := rest.CopyConfig(cfg) + + var testLog bytes.Buffer + testCfg.WarningHandler = rest.NewWarningWriter(&testLog, rest.WarningWriterOptions{}) + + cl, err := client.New(testCfg, client.Options{Cache: &client.CacheOptions{Reader: cache, DisableFor: []client.Object{&corev1.Namespace{}}}}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + tns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "wh-defined"}} + tns, err = clientset.CoreV1().Namespaces().Create(ctx, tns, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(tns).NotTo(BeNil()) + defer deleteNamespace(ctx, tns) + + toCreate := &pkg.ChaosPod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example", + Namespace: tns.Name, + }, + // The ChaosPod CRD does not define Status, so the field is unknown to the API server, + // but field validation is not strict by default, so the API server returns a warning, + // and we need a warning to check whether suppression works. + Status: pkg.ChaosPodStatus{}, + } + err = cl.Create(ctx, toCreate) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + scannerTestLog := bufio.NewScanner(&testLog) + for scannerTestLog.Scan() { + line := scannerTestLog.Text() + if strings.Contains( + line, + "unknown field \"status\"", + ) { + return + } + } + defer Fail("expected to find one API server warning logged the config.WarningHandler") + + scanner := bufio.NewScanner(&log) + for scanner.Scan() { + line := scanner.Text() + if strings.Contains( + line, + "unknown field \"status\"", + ) { + defer Fail("expected to find zero API server warnings in the client log") + break + } + } + }) + }) - // TODO(seans): Cast "cl" as "client" struct from "Client" interface. Then validate the - // instance values for the "client" struct. Describe("New", func() { - It("should return a new Client", func(done Done) { + It("should return a new Client", func() { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - - close(done) }) - It("should fail if the config is nil", func(done Done) { + It("should fail if the config is nil", func() { cl, err := client.New(nil, client.Options{}) Expect(err).To(HaveOccurred()) Expect(cl).To(BeNil()) - - close(done) }) - // TODO(seans): cast as client struct and inspect Scheme - It("should use the provided Scheme if provided", func(done Done) { + It("should use the provided Scheme if provided", func() { cl, err := client.New(cfg, client.Options{Scheme: scheme}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - - close(done) + Expect(cl.Scheme()).ToNot(BeNil()) + Expect(cl.Scheme()).To(Equal(scheme)) }) - // TODO(seans): cast as client struct and inspect Scheme - It("should default the Scheme if not provided", func(done Done) { + It("should default the Scheme if not provided", func() { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) + Expect(cl.Scheme()).ToNot(BeNil()) + Expect(cl.Scheme()).To(Equal(kscheme.Scheme)) + }) - close(done) + It("should use the provided Mapper if provided", func() { + mapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{}) + cl, err := client.New(cfg, client.Options{Mapper: mapper}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + Expect(cl.RESTMapper()).ToNot(BeNil()) + Expect(cl.RESTMapper()).To(Equal(mapper)) }) - PIt("should use the provided Mapper if provided", func() { + It("should create a Mapper if not provided", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + Expect(cl.RESTMapper()).ToNot(BeNil()) + }) + It("should use the provided reader cache if provided, on get and list", func(ctx SpecContext) { + cache := &fakeReader{} + cl, err := client.New(cfg, client.Options{Cache: &client.CacheOptions{Reader: cache}}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + Expect(cl.Get(ctx, client.ObjectKey{Name: "test"}, &appsv1.Deployment{})).To(Succeed()) + Expect(cl.List(ctx, &appsv1.DeploymentList{})).To(Succeed()) + Expect(cache.Called).To(Equal(2)) }) - // TODO(seans): cast as client struct and inspect Mapper - It("should create a Mapper if not provided", func(done Done) { - cl, err := client.New(cfg, client.Options{}) + It("should propagate ErrResourceNotCached errors", func(ctx SpecContext) { + c := &fakeUncachedReader{} + cl, err := client.New(cfg, client.Options{Cache: &client.CacheOptions{Reader: c}}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) + Expect(errors.As(cl.Get(ctx, client.ObjectKey{Name: "test"}, &appsv1.Deployment{}), &errNotCached)).To(BeTrue()) + Expect(errors.As(cl.List(ctx, &appsv1.DeploymentList{}), &errNotCached)).To(BeTrue()) + Expect(c.Called).To(Equal(2)) + }) - close(done) + It("should not use the provided reader cache if provided, on get and list for uncached GVKs", func(ctx SpecContext) { + cache := &fakeReader{} + cl, err := client.New(cfg, client.Options{Cache: &client.CacheOptions{Reader: cache, DisableFor: []client.Object{&corev1.Namespace{}}}}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + Expect(cl.Get(ctx, client.ObjectKey{Name: "default"}, &corev1.Namespace{})).To(Succeed()) + Expect(cl.List(ctx, &corev1.NamespaceList{})).To(Succeed()) + Expect(cache.Called).To(Equal(0)) }) }) Describe("Create", func() { Context("with structured objects", func() { - It("should create a new object from a go struct", func(done Done) { + It("should create a new object from a go struct", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) By("creating the object") - err = cl.Create(context.TODO(), dep) + err = cl.Create(ctx, dep) Expect(err).NotTo(HaveOccurred()) - actual, err := clientset.AppsV1().Deployments(ns).Get(dep.Name, metav1.GetOptions{}) + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(actual).NotTo(BeNil()) By("writing the result back to the go struct") Expect(dep).To(Equal(actual)) - - close(done) }) - It("should create a new object non-namespace object from a go struct", func(done Done) { + It("should create a new object non-namespace object from a go struct", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) By("creating the object") - err = cl.Create(context.TODO(), node) + err = cl.Create(ctx, node) Expect(err).NotTo(HaveOccurred()) - actual, err := clientset.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) + actual, err := clientset.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(actual).NotTo(BeNil()) By("writing the result back to the go struct") Expect(node).To(Equal(actual)) - - close(done) }) - It("should fail if the object already exists", func(done Done) { + It("should fail if the object already exists", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) @@ -222,36 +411,32 @@ var _ = Describe("Client", func() { old := dep.DeepCopy() By("creating the object") - err = cl.Create(context.TODO(), dep) + err = cl.Create(ctx, dep) Expect(err).NotTo(HaveOccurred()) - actual, err := clientset.AppsV1().Deployments(ns).Get(dep.Name, metav1.GetOptions{}) + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(actual).NotTo(BeNil()) By("creating the object a second time") - err = cl.Create(context.TODO(), old) + err = cl.Create(ctx, old) Expect(err).To(HaveOccurred()) Expect(apierrors.IsAlreadyExists(err)).To(BeTrue()) - - close(done) }) - It("should fail if the object does not pass server-side validation", func(done Done) { + It("should fail if the object does not pass server-side validation", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) By("creating the pod, since required field Containers is empty") - err = cl.Create(context.TODO(), pod) + err = cl.Create(ctx, pod) Expect(err).To(HaveOccurred()) // TODO(seans): Add test to validate the returned error. Problems currently with // different returned error locally versus travis. + }) - close(done) - }, serverSideTimeoutSeconds) - - It("should fail if the object cannot be mapped to a GVK", func() { + It("should fail if the object cannot be mapped to a GVK", func(ctx SpecContext) { By("creating client with empty Scheme") emptyScheme := runtime.NewScheme() cl, err := client.New(cfg, client.Options{Scheme: emptyScheme}) @@ -259,7 +444,7 @@ var _ = Describe("Client", func() { Expect(cl).NotTo(BeNil()) By("creating the object fails") - err = cl.Create(context.TODO(), dep) + err = cl.Create(ctx, dep) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("no kind is registered for the type")) }) @@ -270,27 +455,40 @@ var _ = Describe("Client", func() { }) Context("with the DryRun option", func() { - It("should not create a new object", func(done Done) { - cl, err := client.New(cfg, client.Options{}) + It("should not create a new object, global option", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{DryRun: ptr.To(true)}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) By("creating the object (with DryRun)") - err = cl.Create(context.TODO(), dep, client.CreateDryRunAll) + err = cl.Create(ctx, dep) Expect(err).NotTo(HaveOccurred()) - actual, err := clientset.AppsV1().Deployments(ns).Get(dep.Name, metav1.GetOptions{}) + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) Expect(err).To(HaveOccurred()) Expect(apierrors.IsNotFound(err)).To(BeTrue()) Expect(actual).To(Equal(&appsv1.Deployment{})) + }) + + It("should not create a new object, inline option", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) - close(done) + By("creating the object (with DryRun)") + err = cl.Create(ctx, dep, client.DryRunAll) + Expect(err).NotTo(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + Expect(actual).To(Equal(&appsv1.Deployment{})) }) }) }) Context("with unstructured objects", func() { - It("should create a new object from a go struct", func(done Done) { + It("should create a new object from a go struct", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) @@ -305,16 +503,15 @@ var _ = Describe("Client", func() { }) By("creating the object") - err = cl.Create(context.TODO(), u) + err = cl.Create(ctx, u) Expect(err).NotTo(HaveOccurred()) - actual, err := clientset.AppsV1().Deployments(ns).Get(dep.Name, metav1.GetOptions{}) + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(actual).NotTo(BeNil()) - close(done) }) - It("should create a new non-namespace object ", func(done Done) { + It("should create a new non-namespace object ", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) @@ -329,10 +526,10 @@ var _ = Describe("Client", func() { }) By("creating the object") - err = cl.Create(context.TODO(), node) + err = cl.Create(ctx, node) Expect(err).NotTo(HaveOccurred()) - actual, err := clientset.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) + actual, err := clientset.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(actual).NotTo(BeNil()) au := &unstructured.Unstructured{} @@ -341,11 +538,9 @@ var _ = Describe("Client", func() { By("writing the result back to the go struct") Expect(u).To(Equal(au)) - - close(done) }) - It("should fail if the object already exists", func(done Done) { + It("should fail if the object already exists", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) @@ -353,9 +548,9 @@ var _ = Describe("Client", func() { old := dep.DeepCopy() By("creating the object") - err = cl.Create(context.TODO(), dep) + err = cl.Create(ctx, dep) Expect(err).NotTo(HaveOccurred()) - actual, err := clientset.AppsV1().Deployments(ns).Get(dep.Name, metav1.GetOptions{}) + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(actual).NotTo(BeNil()) @@ -369,14 +564,12 @@ var _ = Describe("Client", func() { }) By("creating the object a second time") - err = cl.Create(context.TODO(), u) + err = cl.Create(ctx, u) Expect(err).To(HaveOccurred()) Expect(apierrors.IsAlreadyExists(err)).To(BeTrue()) - - close(done) }) - It("should fail if the object does not pass server-side validation", func(done Done) { + It("should fail if the object does not pass server-side validation", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) @@ -389,18 +582,26 @@ var _ = Describe("Client", func() { Version: "v1", Kind: "Pod", }) - err = cl.Create(context.TODO(), u) + err = cl.Create(ctx, u) Expect(err).To(HaveOccurred()) // TODO(seans): Add test to validate the returned error. Problems currently with // different returned error locally versus travis. + }) + + }) - close(done) - }, serverSideTimeoutSeconds) + Context("with metadata objects", func() { + It("should fail with an error", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + obj := metaOnlyFromObj(dep, scheme) + Expect(cl.Create(ctx, obj)).NotTo(Succeed()) + }) }) Context("with the DryRun option", func() { - It("should not create a new object from a go struct", func(done Done) { + It("should not create a new object from a go struct", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) @@ -415,76 +616,86 @@ var _ = Describe("Client", func() { }) By("creating the object") - err = cl.Create(context.TODO(), u, client.CreateDryRunAll) + err = cl.Create(ctx, u, client.DryRunAll) Expect(err).NotTo(HaveOccurred()) - actual, err := clientset.AppsV1().Deployments(ns).Get(dep.Name, metav1.GetOptions{}) + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) Expect(err).To(HaveOccurred()) Expect(apierrors.IsNotFound(err)).To(BeTrue()) Expect(actual).To(Equal(&appsv1.Deployment{})) - - close(done) }) }) }) Describe("Update", func() { Context("with structured objects", func() { - It("should update an existing object from a go struct", func(done Done) { + It("should update an existing object from a go struct", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) By("initially creating a Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(dep) + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) By("updating the Deployment") dep.Annotations = map[string]string{"foo": "bar"} - err = cl.Update(context.TODO(), dep) + err = cl.Update(ctx, dep) Expect(err).NotTo(HaveOccurred()) By("validating updated Deployment has new annotation") - actual, err := clientset.AppsV1().Deployments(ns).Get(dep.Name, metav1.GetOptions{}) + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(actual).NotTo(BeNil()) Expect(actual.Annotations["foo"]).To(Equal("bar")) + }) + + It("should update and preserve type information", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating the Deployment") + dep.SetGroupVersionKind(depGvk) + err = cl.Update(ctx, dep) + Expect(err).NotTo(HaveOccurred()) - close(done) + By("validating updated Deployment has type information") + Expect(dep.GroupVersionKind()).To(Equal(depGvk)) }) - It("should update an existing object non-namespace object from a go struct", func(done Done) { + It("should update an existing object non-namespace object from a go struct", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - node, err := clientset.CoreV1().Nodes().Create(node) + node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) By("updating the object") node.Annotations = map[string]string{"foo": "bar"} - err = cl.Update(context.TODO(), node) + err = cl.Update(ctx, node) Expect(err).NotTo(HaveOccurred()) By("validate updated Node had new annotation") - actual, err := clientset.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) + actual, err := clientset.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(actual).NotTo(BeNil()) Expect(actual.Annotations["foo"]).To(Equal("bar")) - - close(done) }) - It("should fail if the object does not exists", func(done Done) { + It("should fail if the object does not exist", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) By("updating non-existent object") - err = cl.Update(context.TODO(), dep) + err = cl.Update(ctx, dep) Expect(err).To(HaveOccurred()) - - close(done) }) PIt("should fail if the object does not pass server-side validation", func() { @@ -495,7 +706,7 @@ var _ = Describe("Client", func() { }) - It("should fail if the object cannot be mapped to a GVK", func(done Done) { + It("should fail if the object cannot be mapped to a GVK", func(ctx SpecContext) { By("creating client with empty Scheme") emptyScheme := runtime.NewScheme() cl, err := client.New(cfg, client.Options{Scheme: emptyScheme}) @@ -503,16 +714,14 @@ var _ = Describe("Client", func() { Expect(cl).NotTo(BeNil()) By("initially creating a Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(dep) + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) By("updating the Deployment") dep.Annotations = map[string]string{"foo": "bar"} - err = cl.Update(context.TODO(), dep) + err = cl.Update(ctx, dep) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("no kind is registered for the type")) - - close(done) }) PIt("should fail if the GVK cannot be mapped to a Resource", func() { @@ -520,13 +729,13 @@ var _ = Describe("Client", func() { }) }) Context("with unstructured objects", func() { - It("should update an existing object from a go struct", func(done Done) { + It("should update an existing object from a go struct", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) By("initially creating a Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(dep) + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) By("updating the Deployment") @@ -538,24 +747,43 @@ var _ = Describe("Client", func() { Version: "v1", }) u.SetAnnotations(map[string]string{"foo": "bar"}) - err = cl.Update(context.TODO(), u) + err = cl.Update(ctx, u) Expect(err).NotTo(HaveOccurred()) By("validating updated Deployment has new annotation") - actual, err := clientset.AppsV1().Deployments(ns).Get(dep.Name, metav1.GetOptions{}) + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(actual).NotTo(BeNil()) Expect(actual.Annotations["foo"]).To(Equal("bar")) + }) + + It("should update and preserve type information", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating the Deployment") + u := &unstructured.Unstructured{} + Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + u.SetGroupVersionKind(depGvk) + u.SetAnnotations(map[string]string{"foo": "bar"}) + err = cl.Update(ctx, u) + Expect(err).NotTo(HaveOccurred()) - close(done) + By("validating updated Deployment has type information") + Expect(u.GroupVersionKind()).To(Equal(depGvk)) }) - It("should update an existing object non-namespace object from a go struct", func(done Done) { + It("should update an existing object non-namespace object from a go struct", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - node, err := clientset.CoreV1().Nodes().Create(node) + node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) By("updating the object") @@ -567,18 +795,16 @@ var _ = Describe("Client", func() { Version: "v1", }) u.SetAnnotations(map[string]string{"foo": "bar"}) - err = cl.Update(context.TODO(), u) + err = cl.Update(ctx, u) Expect(err).NotTo(HaveOccurred()) By("validate updated Node had new annotation") - actual, err := clientset.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) + actual, err := clientset.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(actual).NotTo(BeNil()) Expect(actual.Annotations["foo"]).To(Equal("bar")) - - close(done) }) - It("should fail if the object does not exists", func(done Done) { + It("should fail if the object does not exist", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) @@ -586,857 +812,1710 @@ var _ = Describe("Client", func() { By("updating non-existent object") u := &unstructured.Unstructured{} Expect(scheme.Convert(dep, u, nil)).To(Succeed()) - u.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "apps", - Kind: "Deployment", - Version: "v1", - }) - err = cl.Update(context.TODO(), dep) + u.SetGroupVersionKind(depGvk) + err = cl.Update(ctx, dep) Expect(err).To(HaveOccurred()) + }) + }) + Context("with metadata objects", func() { + It("should fail with an error", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + obj := metaOnlyFromObj(dep, scheme) - close(done) + Expect(cl.Update(ctx, obj)).NotTo(Succeed()) }) }) }) - Describe("StatusClient", func() { - Context("with structured objects", func() { - It("should update status of an existing object", func(done Done) { + Describe("Patch", func() { + Context("Metadata Client", func() { + It("should merge patch with options", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) By("initially creating a Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(dep) + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("updating the status of Deployment") - dep.Status.Replicas = 1 - err = cl.Status().Update(context.TODO(), dep) - Expect(err).NotTo(HaveOccurred()) + metadata := metaOnlyFromObj(dep, scheme) + if metadata.Labels == nil { + metadata.Labels = make(map[string]string) + } + metadata.Labels["foo"] = "bar" - By("validating updated Deployment has new status") - actual, err := clientset.AppsV1().Deployments(ns).Get(dep.Name, metav1.GetOptions{}) + testOption := &mockPatchOption{} + Expect(cl.Patch(ctx, metadata, client.Merge, testOption)).To(Succeed()) + + By("validating that patched metadata has new labels") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(actual).NotTo(BeNil()) - Expect(actual.Status.Replicas).To(BeEquivalentTo(1)) + Expect(actual.Labels["foo"]).To(Equal("bar")) - close(done) + By("validating patch options were applied") + Expect(testOption.applied).To(BeTrue()) }) + }) + }) - It("should not update spec of an existing object", func(done Done) { + Describe("Apply", func() { + Context("Unstructured Client", func() { + It("should create and update a configMap using SSA", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("initially creating a Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(dep) + data := map[string]any{ + "some-key": "some-value", + } + obj := &unstructured.Unstructured{Object: map[string]any{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]any{ + "name": "test-configmap", + "namespace": "default", + }, + "data": data, + }} + + err = cl.Apply(ctx, client.ApplyConfigurationFromUnstructured(obj), &client.ApplyOptions{FieldManager: "test-manager"}) Expect(err).NotTo(HaveOccurred()) - By("updating the spec and status of Deployment") - var rc int32 = 1 - dep.Status.Replicas = 1 - dep.Spec.Replicas = &rc - err = cl.Status().Update(context.TODO(), dep) + cm, err := clientset.CoreV1().ConfigMaps(obj.GetNamespace()).Get(ctx, obj.GetName(), metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - By("validating updated Deployment has new status and unchanged spec") - actual, err := clientset.AppsV1().Deployments(ns).Get(dep.Name, metav1.GetOptions{}) + actualData := map[string]any{} + for k, v := range cm.Data { + actualData[k] = v + } + + Expect(actualData).To(BeComparableTo(data)) + Expect(actualData).To(BeComparableTo(obj.Object["data"])) + + data = map[string]any{ + "a-new-key": "a-new-value", + } + obj.Object["data"] = data + unstructured.RemoveNestedField(obj.Object, "metadata", "managedFields") + + err = cl.Apply(ctx, client.ApplyConfigurationFromUnstructured(obj), &client.ApplyOptions{FieldManager: "test-manager"}) Expect(err).NotTo(HaveOccurred()) - Expect(actual).NotTo(BeNil()) - Expect(actual.Status.Replicas).To(BeEquivalentTo(1)) - Expect(*actual.Spec.Replicas).To(BeEquivalentTo(replicaCount)) - close(done) + cm, err = clientset.CoreV1().ConfigMaps(obj.GetNamespace()).Get(ctx, obj.GetName(), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + actualData = map[string]any{} + for k, v := range cm.Data { + actualData[k] = v + } + + Expect(actualData).To(BeComparableTo(data)) + Expect(actualData).To(BeComparableTo(obj.Object["data"])) }) + }) - It("should update an existing object non-namespace object", func(done Done) { + Context("Structured Client", func() { + It("should create and update a configMap using SSA", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - node, err := clientset.CoreV1().Nodes().Create(node) + data := map[string]string{ + "some-key": "some-value", + } + obj := corev1applyconfigurations. + ConfigMap("test-configmap", "default"). + WithData(data) + + err = cl.Apply(ctx, obj, &client.ApplyOptions{FieldManager: "test-manager"}) Expect(err).NotTo(HaveOccurred()) - By("updating status of the object") - node.Status.Phase = corev1.NodeRunning - err = cl.Status().Update(context.TODO(), node) + cm, err := clientset.CoreV1().ConfigMaps(ptr.Deref(obj.GetNamespace(), "")).Get(ctx, ptr.Deref(obj.GetName(), ""), metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - By("validate updated Node had new annotation") - actual, err := clientset.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) + Expect(cm.Data).To(BeComparableTo(data)) + Expect(cm.Data).To(BeComparableTo(obj.Data)) + + data = map[string]string{ + "a-new-key": "a-new-value", + } + obj.Data = data + + err = cl.Apply(ctx, obj, &client.ApplyOptions{FieldManager: "test-manager"}) + Expect(err).NotTo(HaveOccurred()) + + cm, err = clientset.CoreV1().ConfigMaps(ptr.Deref(obj.GetNamespace(), "")).Get(ctx, ptr.Deref(obj.GetName(), ""), metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - Expect(actual).NotTo(BeNil()) - Expect(actual.Status.Phase).To(Equal(corev1.NodeRunning)) - close(done) + Expect(cm.Data).To(BeComparableTo(data)) + Expect(cm.Data).To(BeComparableTo(obj.Data)) }) + }) + }) - It("should fail if the object does not exists", func(done Done) { + Describe("SubResourceClient", func() { + Context("with structured objects", func() { + It("should be able to read the Scale subresource", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("updating status of a non-existent object") - err = cl.Status().Update(context.TODO(), dep) - Expect(err).To(HaveOccurred()) + By("Creating a deployment") + dep, err := clientset.AppsV1().Deployments(dep.Namespace).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) - close(done) + By("reading the scale subresource") + scale := &autoscalingv1.Scale{} + err = cl.SubResource("scale").Get(ctx, dep, scale, &client.SubResourceGetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(scale.Spec.Replicas).To(Equal(*dep.Spec.Replicas)) }) - - It("should fail if the object cannot be mapped to a GVK", func(done Done) { - By("creating client with empty Scheme") - emptyScheme := runtime.NewScheme() - cl, err := client.New(cfg, client.Options{Scheme: emptyScheme}) + It("should be able to create ServiceAccount tokens", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("initially creating a Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(dep) - Expect(err).NotTo(HaveOccurred()) + By("Creating the serviceAccount") + _, err = clientset.CoreV1().ServiceAccounts(serviceAccount.Namespace).Create(ctx, serviceAccount, metav1.CreateOptions{}) + Expect((err)).NotTo(HaveOccurred()) - By("updating status of the Deployment") - dep.Status.Replicas = 1 - err = cl.Status().Update(context.TODO(), dep) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("no kind is registered for the type")) + token := &authenticationv1.TokenRequest{} + err = cl.SubResource("token").Create(ctx, serviceAccount, token, &client.SubResourceCreateOptions{}) + Expect(err).NotTo(HaveOccurred()) - close(done) + Expect(token.Status.Token).NotTo(Equal("")) }) - PIt("should fail if the GVK cannot be mapped to a Resource", func() { + It("should be able to create Pod evictions", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) - }) + // Make the pod valid + pod.Spec.Containers = []corev1.Container{{Name: "foo", Image: "busybox"}} - PIt("should fail if an API does not implement Status subresource", func() { + By("Creating the pod") + pod, err = clientset.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating the eviction") + eviction := &policyv1.Eviction{ + DeleteOptions: &metav1.DeleteOptions{GracePeriodSeconds: ptr.To(int64(0))}, + } + err = cl.SubResource("eviction").Create(ctx, pod, eviction, &client.SubResourceCreateOptions{}) + Expect((err)).NotTo(HaveOccurred()) + By("Asserting the pod is gone") + _, err = clientset.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) }) - }) - Context("with unstructured objects", func() { - It("should update status of an existing object", func(done Done) { + It("should be able to create Pod bindings", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("initially creating a Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(dep) - Expect(err).NotTo(HaveOccurred()) + // Make the pod valid + pod.Spec.Containers = []corev1.Container{{Name: "foo", Image: "busybox"}} - By("updating the status of Deployment") - u := &unstructured.Unstructured{} - dep.Status.Replicas = 1 - Expect(scheme.Convert(dep, u, nil)).To(Succeed()) - err = cl.Status().Update(context.TODO(), u) + By("Creating the pod") + pod, err = clientset.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("validating updated Deployment has new status") - actual, err := clientset.AppsV1().Deployments(ns).Get(dep.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(actual).NotTo(BeNil()) - Expect(actual.Status.Replicas).To(BeEquivalentTo(1)) + By("Creating the binding") + binding := &corev1.Binding{ + Target: corev1.ObjectReference{Name: node.Name}, + } + err = cl.SubResource("binding").Create(ctx, pod, binding, &client.SubResourceCreateOptions{}) + Expect((err)).NotTo(HaveOccurred()) - close(done) + By("Asserting the pod is bound") + pod, err = clientset.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(pod.Spec.NodeName).To(Equal(node.Name)) }) - It("should not update spec of an existing object", func(done Done) { + It("should be able to approve CSRs", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("initially creating a Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(dep) + By("Creating the CSR") + csr, err := clientset.CertificatesV1().CertificateSigningRequests().Create(ctx, csr, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("updating the spec and status of Deployment") - u := &unstructured.Unstructured{} - var rc int32 = 1 - dep.Status.Replicas = 1 - dep.Spec.Replicas = &rc - Expect(scheme.Convert(dep, u, nil)).To(Succeed()) - err = cl.Status().Update(context.TODO(), u) + By("Approving the CSR") + csr.Status.Conditions = append(csr.Status.Conditions, certificatesv1.CertificateSigningRequestCondition{ + Type: certificatesv1.CertificateApproved, + Status: corev1.ConditionTrue, + }) + err = cl.SubResource("approval").Update(ctx, csr, &client.SubResourceUpdateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("validating updated Deployment has new status and unchanged spec") - actual, err := clientset.AppsV1().Deployments(ns).Get(dep.Name, metav1.GetOptions{}) + By("Asserting the CSR is approved") + csr, err = clientset.CertificatesV1().CertificateSigningRequests().Get(ctx, csr.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - Expect(actual).NotTo(BeNil()) - Expect(actual.Status.Replicas).To(BeEquivalentTo(1)) - Expect(*actual.Spec.Replicas).To(BeEquivalentTo(replicaCount)) - - close(done) + Expect(csr.Status.Conditions[0].Type).To(Equal(certificatesv1.CertificateApproved)) + Expect(csr.Status.Conditions[0].Status).To(Equal(corev1.ConditionTrue)) }) - It("should update an existing object non-namespace object", func(done Done) { + It("should be able to approve CSRs using Patch", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - node, err := clientset.CoreV1().Nodes().Create(node) + By("Creating the CSR") + csr, err := clientset.CertificatesV1().CertificateSigningRequests().Create(ctx, csr, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("updating status of the object") - u := &unstructured.Unstructured{} - node.Status.Phase = corev1.NodeRunning - Expect(scheme.Convert(node, u, nil)).To(Succeed()) - err = cl.Status().Update(context.TODO(), u) + By("Approving the CSR") + patch := client.MergeFrom(csr.DeepCopy()) + csr.Status.Conditions = append(csr.Status.Conditions, certificatesv1.CertificateSigningRequestCondition{ + Type: certificatesv1.CertificateApproved, + Status: corev1.ConditionTrue, + }) + err = cl.SubResource("approval").Patch(ctx, csr, patch, &client.SubResourcePatchOptions{}) Expect(err).NotTo(HaveOccurred()) - By("validate updated Node had new annotation") - actual, err := clientset.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) + By("Asserting the CSR is approved") + csr, err = clientset.CertificatesV1().CertificateSigningRequests().Get(ctx, csr.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - Expect(actual).NotTo(BeNil()) - Expect(actual.Status.Phase).To(Equal(corev1.NodeRunning)) - - close(done) + Expect(csr.Status.Conditions[0].Type).To(Equal(certificatesv1.CertificateApproved)) + Expect(csr.Status.Conditions[0].Status).To(Equal(corev1.ConditionTrue)) }) - It("should fail if the object does not exists", func(done Done) { + It("should be able to update the scale subresource", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("updating status of a non-existent object") - u := &unstructured.Unstructured{} - Expect(scheme.Convert(dep, u, nil)).To(Succeed()) - err = cl.Status().Update(context.TODO(), u) - Expect(err).To(HaveOccurred()) + By("Creating a deployment") + dep, err := clientset.AppsV1().Deployments(dep.Namespace).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the scale subresource") + replicaCount := *dep.Spec.Replicas + scale := &autoscalingv1.Scale{Spec: autoscalingv1.ScaleSpec{Replicas: replicaCount}} + err = cl.SubResource("scale").Update(ctx, dep, client.WithSubResourceBody(scale), &client.SubResourceUpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) - close(done) + By("Asserting replicas got updated") + dep, err = clientset.AppsV1().Deployments(dep.Namespace).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(*dep.Spec.Replicas).To(Equal(replicaCount)) }) - PIt("should fail if the GVK cannot be mapped to a Resource", func() { + It("should be able to patch the scale subresource", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) - }) + By("Creating a deployment") + dep, err := clientset.AppsV1().Deployments(dep.Namespace).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) - PIt("should fail if an API does not implement Status subresource", func() { + By("Updating the scale subresurce") + replicaCount := *dep.Spec.Replicas + patch := client.MergeFrom(&autoscalingv1.Scale{}) + scale := &autoscalingv1.Scale{Spec: autoscalingv1.ScaleSpec{Replicas: replicaCount}} + err = cl.SubResource("scale").Patch(ctx, dep, patch, client.WithSubResourceBody(scale), &client.SubResourcePatchOptions{}) + Expect(err).NotTo(HaveOccurred()) + By("Asserting replicas got updated") + dep, err = clientset.AppsV1().Deployments(dep.Namespace).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(*dep.Spec.Replicas).To(Equal(replicaCount)) }) - }) - }) - - Describe("Delete", func() { - Context("with structured objects", func() { - It("should delete an existing object from a go struct", func(done Done) { + It("should be able to apply the scale subresource", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("initially creating a Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(dep) + By("Creating a deployment") + dep, err := clientset.AppsV1().Deployments(dep.Namespace).Create(ctx, dep, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) + replicaCount := *dep.Spec.Replicas + 1 - By("deleting the Deployment") - depName := dep.Name - err = cl.Delete(context.TODO(), dep) + By("Applying the scale subresurce") + deploymentAC, err := appsv1applyconfigurations.ExtractDeployment(dep, "foo") + Expect(err).NotTo(HaveOccurred()) + scale := autoscaling1applyconfigurations.Scale(). + WithSpec(autoscaling1applyconfigurations.ScaleSpec().WithReplicas(replicaCount)) + err = cl.SubResource("scale").Apply(ctx, deploymentAC, + &client.SubResourceApplyOptions{SubResourceBody: scale}, + client.FieldOwner("foo"), + client.ForceOwnership, + ) Expect(err).NotTo(HaveOccurred()) - By("validating the Deployment no longer exists") - _, err = clientset.AppsV1().Deployments(ns).Get(depName, metav1.GetOptions{}) - Expect(err).To(HaveOccurred()) - - close(done) + By("Asserting replicas got updated") + dep, err = clientset.AppsV1().Deployments(dep.Namespace).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(*dep.Spec.Replicas).To(Equal(replicaCount)) }) + }) - It("should delete an existing object non-namespace object from a go struct", func(done Done) { - cl, err := client.New(cfg, client.Options{}) + Context("with unstructured objects", func() { + It("should be able to read the Scale subresource", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{Scheme: runtime.NewScheme()}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("initially creating a Node") - node, err := clientset.CoreV1().Nodes().Create(node) + By("Creating a deployment") + dep, err := clientset.AppsV1().Deployments(dep.Namespace).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + dep.APIVersion = appsv1.SchemeGroupVersion.String() + dep.Kind = reflect.TypeOf(dep).Elem().Name() + depUnstructured, err := toUnstructured(dep) Expect(err).NotTo(HaveOccurred()) - By("deleting the Node") - nodeName := node.Name - err = cl.Delete(context.TODO(), node) + By("reading the scale subresource") + scale := &unstructured.Unstructured{} + scale.SetAPIVersion("autoscaling/v1") + scale.SetKind("Scale") + err = cl.SubResource("scale").Get(ctx, depUnstructured, scale) Expect(err).NotTo(HaveOccurred()) - By("validating the Node no longer exists") - _, err = clientset.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) - Expect(err).To(HaveOccurred()) + val, found, err := unstructured.NestedInt64(scale.UnstructuredContent(), "spec", "replicas") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + Expect(int32(val)).To(Equal(*dep.Spec.Replicas)) + }) + It("should be able to create ServiceAccount tokens", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{Scheme: runtime.NewScheme()}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("Creating the serviceAccount") + _, err = clientset.CoreV1().ServiceAccounts(serviceAccount.Namespace).Create(ctx, serviceAccount, metav1.CreateOptions{}) + Expect((err)).NotTo(HaveOccurred()) + + serviceAccount.APIVersion = "v1" + serviceAccount.Kind = "ServiceAccount" + serviceAccountUnstructured, err := toUnstructured(serviceAccount) + Expect(err).NotTo(HaveOccurred()) - close(done) + token := &unstructured.Unstructured{} + token.SetAPIVersion("authentication.k8s.io/v1") + token.SetKind("TokenRequest") + err = cl.SubResource("token").Create(ctx, serviceAccountUnstructured, token) + Expect(err).NotTo(HaveOccurred()) + Expect(token.GetAPIVersion()).To(Equal("authentication.k8s.io/v1")) + Expect(token.GetKind()).To(Equal("TokenRequest")) + + val, found, err := unstructured.NestedString(token.UnstructuredContent(), "status", "token") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + Expect(val).NotTo(Equal("")) }) - It("should fail if the object does not exists", func(done Done) { - cl, err := client.New(cfg, client.Options{}) + It("should be able to create Pod evictions", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{Scheme: runtime.NewScheme()}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("Deleting node before it is ever created") - err = cl.Delete(context.TODO(), node) - Expect(err).To(HaveOccurred()) + // Make the pod valid + pod.Spec.Containers = []corev1.Container{{Name: "foo", Image: "busybox"}} - close(done) - }) + By("Creating the pod") + pod, err = clientset.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) - PIt("should fail if the object doesn't have meta", func() { + pod.APIVersion = "v1" + pod.Kind = "Pod" + podUnstructured, err := toUnstructured(pod) + Expect(err).NotTo(HaveOccurred()) + + By("Creating the eviction") + eviction := &unstructured.Unstructured{} + eviction.SetAPIVersion("policy/v1") + eviction.SetKind("Eviction") + err = unstructured.SetNestedField(eviction.UnstructuredContent(), int64(0), "deleteOptions", "gracePeriodSeconds") + Expect(err).NotTo(HaveOccurred()) + err = cl.SubResource("eviction").Create(ctx, podUnstructured, eviction) + Expect(err).NotTo(HaveOccurred()) + Expect(eviction.GetAPIVersion()).To(Equal("policy/v1")) + Expect(eviction.GetKind()).To(Equal("Eviction")) + By("Asserting the pod is gone") + _, err = clientset.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) }) - It("should fail if the object cannot be mapped to a GVK", func(done Done) { - By("creating client with empty Scheme") - emptyScheme := runtime.NewScheme() - cl, err := client.New(cfg, client.Options{Scheme: emptyScheme}) + It("should be able to create Pod bindings", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{Scheme: runtime.NewScheme()}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("initially creating a Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(dep) + // Make the pod valid + pod.Spec.Containers = []corev1.Container{{Name: "foo", Image: "busybox"}} + + By("Creating the pod") + pod, err = clientset.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("deleting the Deployment fails") - err = cl.Delete(context.TODO(), dep) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("no kind is registered for the type")) + pod.APIVersion = "v1" + pod.Kind = "Pod" + podUnstructured, err := toUnstructured(pod) + Expect(err).NotTo(HaveOccurred()) - close(done) - }) + By("Creating the binding") + binding := &unstructured.Unstructured{} + binding.SetAPIVersion("v1") + binding.SetKind("Binding") + err = unstructured.SetNestedField(binding.UnstructuredContent(), node.Name, "target", "name") + Expect(err).NotTo(HaveOccurred()) - PIt("should fail if the GVK cannot be mapped to a Resource", func() { + err = cl.SubResource("binding").Create(ctx, podUnstructured, binding) + Expect((err)).NotTo(HaveOccurred()) + Expect(binding.GetAPIVersion()).To(Equal("v1")) + Expect(binding.GetKind()).To(Equal("Binding")) + By("Asserting the pod is bound") + pod, err = clientset.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(pod.Spec.NodeName).To(Equal(node.Name)) }) - }) - Context("with unstructured objects", func() { - It("should delete an existing object from a go struct", func(done Done) { - cl, err := client.New(cfg, client.Options{}) + + It("should be able to approve CSRs", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{Scheme: runtime.NewScheme()}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("initially creating a Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(dep) + By("Creating the CSR") + csr, err := clientset.CertificatesV1().CertificateSigningRequests().Create(ctx, csr, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("deleting the Deployment") - depName := dep.Name - u := &unstructured.Unstructured{} - Expect(scheme.Convert(dep, u, nil)).To(Succeed()) - u.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "apps", - Kind: "Deployment", - Version: "v1", + By("Approving the CSR") + csr.Status.Conditions = append(csr.Status.Conditions, certificatesv1.CertificateSigningRequestCondition{ + Type: certificatesv1.CertificateApproved, + Status: corev1.ConditionTrue, }) - err = cl.Delete(context.TODO(), u) + csr.APIVersion = "certificates.k8s.io/v1" + csr.Kind = "CertificateSigningRequest" + csrUnstructured, err := toUnstructured(csr) Expect(err).NotTo(HaveOccurred()) - By("validating the Deployment no longer exists") - _, err = clientset.AppsV1().Deployments(ns).Get(depName, metav1.GetOptions{}) - Expect(err).To(HaveOccurred()) + err = cl.SubResource("approval").Update(ctx, csrUnstructured) + Expect(err).NotTo(HaveOccurred()) + Expect(csrUnstructured.GetAPIVersion()).To(Equal("certificates.k8s.io/v1")) + Expect(csrUnstructured.GetKind()).To(Equal("CertificateSigningRequest")) - close(done) + By("Asserting the CSR is approved") + csr, err = clientset.CertificatesV1().CertificateSigningRequests().Get(ctx, csr.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(csr.Status.Conditions[0].Type).To(Equal(certificatesv1.CertificateApproved)) + Expect(csr.Status.Conditions[0].Status).To(Equal(corev1.ConditionTrue)) }) - It("should delete an existing object non-namespace object from a go struct", func(done Done) { - cl, err := client.New(cfg, client.Options{}) + It("should be able to approve CSRs using Patch", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{Scheme: runtime.NewScheme()}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("initially creating a Node") - node, err := clientset.CoreV1().Nodes().Create(node) + By("Creating the CSR") + csr, err := clientset.CertificatesV1().CertificateSigningRequests().Create(ctx, csr, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("deleting the Node") - nodeName := node.Name - u := &unstructured.Unstructured{} - Expect(scheme.Convert(node, u, nil)).To(Succeed()) - u.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "", - Kind: "Node", - Version: "v1", + By("Approving the CSR") + patch := client.MergeFrom(csr.DeepCopy()) + csr.Status.Conditions = append(csr.Status.Conditions, certificatesv1.CertificateSigningRequestCondition{ + Type: certificatesv1.CertificateApproved, + Status: corev1.ConditionTrue, }) - err = cl.Delete(context.TODO(), u) + csr.APIVersion = "certificates.k8s.io/v1" + csr.Kind = "CertificateSigningRequest" + csrUnstructured, err := toUnstructured(csr) Expect(err).NotTo(HaveOccurred()) - By("validating the Node no longer exists") - _, err = clientset.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) - Expect(err).To(HaveOccurred()) + err = cl.SubResource("approval").Patch(ctx, csrUnstructured, patch) + Expect(err).NotTo(HaveOccurred()) + Expect(csrUnstructured.GetAPIVersion()).To(Equal("certificates.k8s.io/v1")) + Expect(csrUnstructured.GetKind()).To(Equal("CertificateSigningRequest")) - close(done) + By("Asserting the CSR is approved") + csr, err = clientset.CertificatesV1().CertificateSigningRequests().Get(ctx, csr.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(csr.Status.Conditions[0].Type).To(Equal(certificatesv1.CertificateApproved)) + Expect(csr.Status.Conditions[0].Status).To(Equal(corev1.ConditionTrue)) }) - It("should fail if the object does not exists", func(done Done) { - cl, err := client.New(cfg, client.Options{}) + It("should be able to update the scale subresource", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{Scheme: runtime.NewScheme()}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("Deleting node before it is ever created") - u := &unstructured.Unstructured{} - Expect(scheme.Convert(node, u, nil)).To(Succeed()) - u.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "", - Kind: "Node", - Version: "v1", - }) - err = cl.Delete(context.TODO(), node) - Expect(err).To(HaveOccurred()) + By("Creating a deployment") + dep, err := clientset.AppsV1().Deployments(dep.Namespace).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + dep.APIVersion = appsv1.SchemeGroupVersion.String() + dep.Kind = "Deployment" //nolint:goconst + depUnstructured, err := toUnstructured(dep) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the scale subresurce") + replicaCount := *dep.Spec.Replicas + scale := &unstructured.Unstructured{} + scale.SetAPIVersion("autoscaling/v1") + scale.SetKind("Scale") + Expect(unstructured.SetNestedField(scale.Object, int64(replicaCount), "spec", "replicas")).NotTo(HaveOccurred()) + err = cl.SubResource("scale").Update(ctx, depUnstructured, client.WithSubResourceBody(scale)) + Expect(err).NotTo(HaveOccurred()) + Expect(scale.GetAPIVersion()).To(Equal("autoscaling/v1")) + Expect(scale.GetKind()).To(Equal("Scale")) - close(done) + By("Asserting replicas got updated") + dep, err = clientset.AppsV1().Deployments(dep.Namespace).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(*dep.Spec.Replicas).To(Equal(replicaCount)) }) - }) - }) - Describe("Patch", func() { - Context("with structured objects", func() { - It("should patch an existing object from a go struct", func(done Done) { - cl, err := client.New(cfg, client.Options{}) + It("should be able to patch the scale subresource", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{Scheme: runtime.NewScheme()}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("initially creating a Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(dep) + By("Creating a deployment") + dep, err := clientset.AppsV1().Deployments(dep.Namespace).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + dep.APIVersion = "apps/v1" + dep.Kind = "Deployment" + depUnstructured, err := toUnstructured(dep) Expect(err).NotTo(HaveOccurred()) - By("patching the Deployment") - err = cl.Patch(context.TODO(), dep, client.ConstantPatch(types.MergePatchType, mergePatch)) + By("Updating the scale subresurce") + replicaCount := *dep.Spec.Replicas + scale := &unstructured.Unstructured{} + scale.SetAPIVersion("autoscaling/v1") + scale.SetKind("Scale") + patch := client.MergeFrom(scale.DeepCopy()) + Expect(unstructured.SetNestedField(scale.Object, int64(replicaCount), "spec", "replicas")).NotTo(HaveOccurred()) + err = cl.SubResource("scale").Patch(ctx, depUnstructured, patch, client.WithSubResourceBody(scale)) Expect(err).NotTo(HaveOccurred()) + Expect(scale.GetAPIVersion()).To(Equal("autoscaling/v1")) + Expect(scale.GetKind()).To(Equal("Scale")) - By("validating patched Deployment has new annotation") - actual, err := clientset.AppsV1().Deployments(ns).Get(dep.Name, metav1.GetOptions{}) + By("Asserting replicas got updated") + dep, err = clientset.AppsV1().Deployments(dep.Namespace).Get(ctx, dep.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - Expect(actual).NotTo(BeNil()) - Expect(actual.Annotations["foo"]).To(Equal("bar")) + Expect(*dep.Spec.Replicas).To(Equal(replicaCount)) + }) + + It("should be able to apply the scale subresource", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{Scheme: runtime.NewScheme()}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("Creating a deployment") + dep, err := clientset.AppsV1().Deployments(dep.Namespace).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + dep.APIVersion = "apps/v1" + dep.Kind = "Deployment" + depUnstructured, err := toUnstructured(dep) + Expect(err).NotTo(HaveOccurred()) + + By("Updating the scale subresurce") + replicaCount := *dep.Spec.Replicas + 1 + scale := &unstructured.Unstructured{} + scale.SetAPIVersion("autoscaling/v1") + scale.SetKind("Scale") + Expect(unstructured.SetNestedField(scale.Object, int64(replicaCount), "spec", "replicas")).NotTo(HaveOccurred()) + err = cl.SubResource("scale").Apply(ctx, + client.ApplyConfigurationFromUnstructured(depUnstructured), + &client.SubResourceApplyOptions{SubResourceBody: client.ApplyConfigurationFromUnstructured(scale)}, + client.FieldOwner("foo"), + client.ForceOwnership, + ) + Expect(err).NotTo(HaveOccurred()) + Expect(scale.GetAPIVersion()).To(Equal("autoscaling/v1")) + Expect(scale.GetKind()).To(Equal("Scale")) - close(done) + By("Asserting replicas got updated") + dep, err = clientset.AppsV1().Deployments(dep.Namespace).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(*dep.Spec.Replicas).To(Equal(replicaCount)) }) + }) + + }) - It("should patch an existing object non-namespace object from a go struct", func(done Done) { + Describe("StatusClient", func() { + Context("with structured objects", func() { + It("should update status of an existing object", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("initially creating a Node") - node, err := clientset.CoreV1().Nodes().Create(node) + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("patching the Node") - nodeName := node.Name - err = cl.Patch(context.TODO(), node, client.ConstantPatch(types.MergePatchType, mergePatch)) + By("updating the status of Deployment") + dep.Status.Replicas = 1 + err = cl.Status().Update(ctx, dep) Expect(err).NotTo(HaveOccurred()) - By("validating the Node no longer exists") - actual, err := clientset.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + By("validating updated Deployment has new status") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(actual).NotTo(BeNil()) - Expect(actual.Annotations["foo"]).To(Equal("bar")) - - close(done) + Expect(actual.Status.Replicas).To(BeEquivalentTo(1)) }) - It("should fail if the object does not exists", func(done Done) { + It("should update status and preserve type information", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("Patching node before it is ever created") - err = cl.Patch(context.TODO(), node, client.ConstantPatch(types.MergePatchType, mergePatch)) - Expect(err).To(HaveOccurred()) - - close(done) - }) + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) - PIt("should fail if the object doesn't have meta", func() { + By("updating the status of Deployment") + dep.SetGroupVersionKind(depGvk) + dep.Status.Replicas = 1 + err = cl.Status().Update(ctx, dep) + Expect(err).NotTo(HaveOccurred()) + By("validating updated Deployment has type information") + Expect(dep.GroupVersionKind()).To(Equal(depGvk)) }) - It("should fail if the object cannot be mapped to a GVK", func(done Done) { - By("creating client with empty Scheme") - emptyScheme := runtime.NewScheme() - cl, err := client.New(cfg, client.Options{Scheme: emptyScheme}) + It("should patch status and preserve type information", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) By("initially creating a Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(dep) + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("patching the Deployment fails") - err = cl.Patch(context.TODO(), dep, client.ConstantPatch(types.MergePatchType, mergePatch)) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("no kind is registered for the type")) - - close(done) - }) - - PIt("should fail if the GVK cannot be mapped to a Resource", func() { + By("patching the status of Deployment") + dep.SetGroupVersionKind(depGvk) + depPatch := client.MergeFrom(dep.DeepCopy()) + dep.Status.Replicas = 1 + err = cl.Status().Patch(ctx, dep, depPatch) + Expect(err).NotTo(HaveOccurred()) + By("validating updated Deployment has type information") + Expect(dep.GroupVersionKind()).To(Equal(depGvk)) }) - It("should respect passed in update options", func() { - By("creating a new client") + It("should apply status", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) By("initially creating a Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(dep) + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) + Expect(dep.Status.Replicas).To(BeEquivalentTo(0)) - By("patching the Deployment with dry-run") - err = cl.Patch(context.TODO(), dep, client.ConstantPatch(types.MergePatchType, mergePatch), client.PatchDryRunAll) + By("applying the status of Deployment") + deploymentAC, err := appsv1applyconfigurations.ExtractDeployment(dep, "foo") Expect(err).NotTo(HaveOccurred()) + deploymentAC.WithStatus(&appsv1applyconfigurations.DeploymentStatusApplyConfiguration{ + Replicas: ptr.To(int32(1)), + }) + Expect(cl.Status().Apply(ctx, deploymentAC, client.FieldOwner("foo"))).To(Succeed()) - By("validating patched Deployment doesn't have the new annotation") - actual, err := clientset.AppsV1().Deployments(ns).Get(dep.Name, metav1.GetOptions{}) + dep, err = clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - Expect(actual).NotTo(BeNil()) - Expect(actual.Annotations).NotTo(HaveKey("foo")) + Expect(dep.Status.Replicas).To(BeEquivalentTo(1)) }) - }) - Context("with unstructured objects", func() { - It("should patch an existing object from a go struct", func(done Done) { + + It("should not update spec of an existing object", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) By("initially creating a Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(dep) + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("patching the Deployment") - depName := dep.Name - u := &unstructured.Unstructured{} - Expect(scheme.Convert(dep, u, nil)).To(Succeed()) - u.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "apps", - Kind: "Deployment", - Version: "v1", - }) - err = cl.Patch(context.TODO(), u, client.ConstantPatch(types.MergePatchType, mergePatch)) + By("updating the spec and status of Deployment") + var rc int32 = 1 + dep.Status.Replicas = 1 + dep.Spec.Replicas = &rc + err = cl.Status().Update(ctx, dep) Expect(err).NotTo(HaveOccurred()) - By("validating patched Deployment has new annotation") - actual, err := clientset.AppsV1().Deployments(ns).Get(depName, metav1.GetOptions{}) + By("validating updated Deployment has new status and unchanged spec") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(actual).NotTo(BeNil()) - Expect(actual.Annotations["foo"]).To(Equal("bar")) - - close(done) + Expect(actual.Status.Replicas).To(BeEquivalentTo(1)) + Expect(*actual.Spec.Replicas).To(BeEquivalentTo(replicaCount)) }) - It("should patch an existing object non-namespace object from a go struct", func(done Done) { + It("should update an existing object non-namespace object", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("initially creating a Node") - node, err := clientset.CoreV1().Nodes().Create(node) + node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("patching the Node") - nodeName := node.Name - u := &unstructured.Unstructured{} - Expect(scheme.Convert(node, u, nil)).To(Succeed()) - u.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "", - Kind: "Node", - Version: "v1", - }) - err = cl.Patch(context.TODO(), u, client.ConstantPatch(types.MergePatchType, mergePatch)) + By("updating status of the object") + node.Status.Phase = corev1.NodeRunning + err = cl.Status().Update(ctx, node) Expect(err).NotTo(HaveOccurred()) - By("validating pathed Node has new annotation") - actual, err := clientset.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + By("validate updated Node had new annotation") + actual, err := clientset.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(actual).NotTo(BeNil()) - Expect(actual.Annotations["foo"]).To(Equal("bar")) - - close(done) + Expect(actual.Status.Phase).To(Equal(corev1.NodeRunning)) }) - It("should fail if the object does not exist", func(done Done) { + It("should fail if the object does not exist", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("Patching node before it is ever created") - u := &unstructured.Unstructured{} - Expect(scheme.Convert(node, u, nil)).To(Succeed()) - u.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "", - Kind: "Node", - Version: "v1", - }) - err = cl.Patch(context.TODO(), node, client.ConstantPatch(types.MergePatchType, mergePatch)) + By("updating status of a non-existent object") + err = cl.Status().Update(ctx, dep) + Expect(err).To(HaveOccurred()) + }) + + It("should fail if the object cannot be mapped to a GVK", func(ctx SpecContext) { + By("creating client with empty Scheme") + emptyScheme := runtime.NewScheme() + cl, err := client.New(cfg, client.Options{Scheme: emptyScheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating status of the Deployment") + dep.Status.Replicas = 1 + err = cl.Status().Update(ctx, dep) Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no kind is registered for the type")) + }) + + PIt("should fail if the GVK cannot be mapped to a Resource", func() { + + }) + + PIt("should fail if an API does not implement Status subresource", func() { - close(done) }) + }) - It("should respect passed-in update options", func() { - By("creating a new client") + Context("with unstructured objects", func() { + It("should update status of an existing object", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) By("initially creating a Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(dep) + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("patching the Deployment") - depName := dep.Name + By("updating the status of Deployment") u := &unstructured.Unstructured{} + dep.Status.Replicas = 1 Expect(scheme.Convert(dep, u, nil)).To(Succeed()) - u.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "apps", - Kind: "Deployment", - Version: "v1", - }) - err = cl.Patch(context.TODO(), u, client.ConstantPatch(types.MergePatchType, mergePatch), client.PatchDryRunAll) + err = cl.Status().Update(ctx, u) Expect(err).NotTo(HaveOccurred()) - By("validating patched Deployment does not have the new annotation") - actual, err := clientset.AppsV1().Deployments(ns).Get(depName, metav1.GetOptions{}) + By("validating updated Deployment has new status") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(actual).NotTo(BeNil()) - Expect(actual.Annotations).NotTo(HaveKey("foo")) + Expect(actual.Status.Replicas).To(BeEquivalentTo(1)) }) - }) - }) - - Describe("Get", func() { - Context("with structured objects", func() { - It("should fetch an existing object for a go struct", func(done Done) { - By("first creating the Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(dep) - Expect(err).NotTo(HaveOccurred()) + It("should update status and preserve type information", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("fetching the created Deployment") - var actual appsv1.Deployment - key := client.ObjectKey{Namespace: ns, Name: dep.Name} - err = cl.Get(context.TODO(), key, &actual) + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - Expect(actual).NotTo(BeNil()) - By("validating the fetched deployment equals the created one") - Expect(dep).To(Equal(&actual)) + By("updating the status of Deployment") + u := &unstructured.Unstructured{} + dep.Status.Replicas = 1 + Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + err = cl.Status().Update(ctx, u) + Expect(err).NotTo(HaveOccurred()) - close(done) + By("validating updated Deployment has type information") + Expect(u.GroupVersionKind()).To(Equal(depGvk)) }) - It("should fetch an existing non-namespace object for a go struct", func(done Done) { - By("first creating the object") - node, err := clientset.CoreV1().Nodes().Create(node) + It("should patch status and preserve type information", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("patching the status of Deployment") + u := &unstructured.Unstructured{} + depPatch := client.MergeFrom(dep.DeepCopy()) + dep.Status.Replicas = 1 + Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + err = cl.Status().Patch(ctx, u, depPatch) + Expect(err).NotTo(HaveOccurred()) + + By("validating updated Deployment has type information") + Expect(u.GroupVersionKind()).To(Equal(depGvk)) + + By("validating patched Deployment has new status") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.Status.Replicas).To(BeEquivalentTo(1)) + }) + It("should apply status and preserve type information", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("retrieving node through client") - var actual corev1.Node - key := client.ObjectKey{Namespace: ns, Name: node.Name} - err = cl.Get(context.TODO(), key, &actual) + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - Expect(actual).NotTo(BeNil()) + Expect(dep.Status.Replicas).To(BeEquivalentTo(0)) - Expect(node).To(Equal(&actual)) + By("applying the status of Deployment") + dep.Status.Replicas = 1 + dep.ManagedFields = nil // Must be unset in SSA requests + u := &unstructured.Unstructured{} + Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + err = cl.Status().Apply(ctx, client.ApplyConfigurationFromUnstructured(u), client.FieldOwner("foo")) + Expect(err).NotTo(HaveOccurred()) + + By("validating updated Deployment has type information") + Expect(u.GroupVersionKind()).To(Equal(depGvk)) - close(done) + By("validating patched Deployment has new status") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.Status.Replicas).To(BeEquivalentTo(1)) }) - It("should fail if the object does not exists", func(done Done) { + It("should not update spec of an existing object", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("fetching object that has not been created yet") - key := client.ObjectKey{Namespace: ns, Name: dep.Name} - var actual appsv1.Deployment - err = cl.Get(context.TODO(), key, &actual) - Expect(err).To(HaveOccurred()) + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating the spec and status of Deployment") + u := &unstructured.Unstructured{} + var rc int32 = 1 + dep.Status.Replicas = 1 + dep.Spec.Replicas = &rc + Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + err = cl.Status().Update(ctx, u) + Expect(err).NotTo(HaveOccurred()) - close(done) + By("validating updated Deployment has new status and unchanged spec") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.Status.Replicas).To(BeEquivalentTo(1)) + Expect(*actual.Spec.Replicas).To(BeEquivalentTo(replicaCount)) }) - PIt("should fail if the object doesn't have meta", func() { + It("should update an existing object non-namespace object", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) - }) + node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) - It("should fail if the object cannot be mapped to a GVK", func() { - By("first creating the Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(dep) + By("updating status of the object") + u := &unstructured.Unstructured{} + node.Status.Phase = corev1.NodeRunning + Expect(scheme.Convert(node, u, nil)).To(Succeed()) + err = cl.Status().Update(ctx, u) Expect(err).NotTo(HaveOccurred()) - By("creating a client with an empty Scheme") - emptyScheme := runtime.NewScheme() - cl, err := client.New(cfg, client.Options{Scheme: emptyScheme}) + By("validate updated Node had new annotation") + actual, err := clientset.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.Status.Phase).To(Equal(corev1.NodeRunning)) + }) + + It("should fail if the object does not exist", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("fetching the created Deployment fails") - var actual appsv1.Deployment - key := client.ObjectKey{Namespace: ns, Name: dep.Name} - err = cl.Get(context.TODO(), key, &actual) + By("updating status of a non-existent object") + u := &unstructured.Unstructured{} + Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + err = cl.Status().Update(ctx, u) Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("no kind is registered for the type")) }) PIt("should fail if the GVK cannot be mapped to a Resource", func() { }) + + PIt("should fail if an API does not implement Status subresource", func() { + + }) + }) - Context("with unstructured objects", func() { - It("should fetch an existing object", func(done Done) { - By("first creating the Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(dep) + Context("with metadata objects", func() { + It("should fail to update with an error", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) + obj := metaOnlyFromObj(dep, scheme) + Expect(cl.Status().Update(ctx, obj)).NotTo(Succeed()) + }) + + It("should patch status and preserve type information", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("encoding the Deployment as unstructured") - var u runtime.Unstructured = &unstructured.Unstructured{} - Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) - By("fetching the created Deployment") - var actual unstructured.Unstructured - actual.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "apps", - Kind: "Deployment", - Version: "v1", - }) - key := client.ObjectKey{Namespace: ns, Name: dep.Name} - err = cl.Get(context.TODO(), key, &actual) + By("patching the status of Deployment") + objPatch := client.MergeFrom(metaOnlyFromObj(dep, scheme)) + dep.Annotations = map[string]string{"some-new-annotation": "some-new-value"} + obj := metaOnlyFromObj(dep, scheme) + err = cl.Status().Patch(ctx, obj, objPatch) Expect(err).NotTo(HaveOccurred()) - Expect(actual).NotTo(BeNil()) - By("validating the fetched Deployment equals the created one") - Expect(u).To(Equal(&actual)) + By("validating updated Deployment has type information") + Expect(obj.GroupVersionKind()).To(Equal(depGvk)) - close(done) + By("validating patched Deployment has new status") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.Annotations).To(HaveKeyWithValue("some-new-annotation", "some-new-value")) }) + }) + }) - It("should fetch an existing non-namespace object", func(done Done) { - By("first creating the Node") - node, err := clientset.CoreV1().Nodes().Create(node) + Describe("Delete", func() { + Context("with structured objects", func() { + It("should delete an existing object from a go struct", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) - By("encoding the Node as unstructured") - var u runtime.Unstructured = &unstructured.Unstructured{} - Expect(scheme.Convert(node, u, nil)).To(Succeed()) + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("deleting the Deployment") + depName := dep.Name + err = cl.Delete(ctx, dep) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Deployment no longer exists") + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, depName, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) + It("should delete an existing object non-namespace object from a go struct", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("fetching the created Node") - var actual unstructured.Unstructured - actual.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "", - Kind: "Node", - Version: "v1", - }) - key := client.ObjectKey{Namespace: ns, Name: node.Name} - err = cl.Get(context.TODO(), key, &actual) + By("initially creating a Node") + node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - Expect(actual).NotTo(BeNil()) - By("validating the fetched Node equals the created one") - Expect(u).To(Equal(&actual)) + By("deleting the Node") + nodeName := node.Name + err = cl.Delete(ctx, node) + Expect(err).NotTo(HaveOccurred()) - close(done) + By("validating the Node no longer exists") + _, err = clientset.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) }) - It("should fail if the object does not exists", func(done Done) { + It("should fail if the object does not exist", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("fetching object that has not been created yet") - key := client.ObjectKey{Namespace: ns, Name: dep.Name} - u := &unstructured.Unstructured{} - err = cl.Get(context.TODO(), key, u) + By("Deleting node before it is ever created") + err = cl.Delete(ctx, node) Expect(err).To(HaveOccurred()) + }) + + PIt("should fail if the object doesn't have meta", func() { - close(done) }) - }) - }) - Describe("List", func() { - Context("with structured objects", func() { - It("should fetch collection of objects", func(done Done) { - By("creating an initial object") - dep, err := clientset.AppsV1().Deployments(ns).Create(dep) + It("should fail if the object cannot be mapped to a GVK", func(ctx SpecContext) { + By("creating client with empty Scheme") + emptyScheme := runtime.NewScheme() + cl, err := client.New(cfg, client.Options{Scheme: emptyScheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) + By("deleting the Deployment fails") + err = cl.Delete(ctx, dep) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no kind is registered for the type")) + }) + + PIt("should fail if the GVK cannot be mapped to a Resource", func() { + + }) + + It("should delete a collection of objects", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) - By("listing all objects of that type in the cluster") - deps := &appsv1.DeploymentList{} - Expect(cl.List(context.Background(), deps)).NotTo(HaveOccurred()) + By("initially creating two Deployments") - Expect(deps.Items).NotTo(BeEmpty()) - hasDep := false - for _, item := range deps.Items { - if item.Name == dep.Name && item.Namespace == dep.Namespace { - hasDep = true - break - } - } - Expect(hasDep).To(BeTrue()) + dep2 := dep.DeepCopy() + dep2.Name += "-2" + + dep, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + dep2, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep2, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) - close(done) - }, serverSideTimeoutSeconds) + depName := dep.Name + dep2Name := dep2.Name - It("should fetch unstructured collection of objects", func(done Done) { - By("create an initial object") - _, err := clientset.AppsV1().Deployments(ns).Create(dep) + By("deleting Deployments") + err = cl.DeleteAllOf(ctx, dep, client.InNamespace(ns), client.MatchingLabels(dep.ObjectMeta.Labels)) Expect(err).NotTo(HaveOccurred()) + By("validating the Deployment no longer exists") + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, depName, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, dep2Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) + }) + Context("with unstructured objects", func() { + It("should delete an existing object from a go struct", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) - By("listing all objects of that type in the cluster") - deps := &unstructured.UnstructuredList{} - deps.SetGroupVersionKind(schema.GroupVersionKind{ + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("deleting the Deployment") + depName := dep.Name + u := &unstructured.Unstructured{} + Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + u.SetGroupVersionKind(schema.GroupVersionKind{ Group: "apps", - Kind: "DeploymentList", + Kind: "Deployment", Version: "v1", }) - err = cl.List(context.Background(), deps) + err = cl.Delete(ctx, u) Expect(err).NotTo(HaveOccurred()) - Expect(deps.Items).NotTo(BeEmpty()) - hasDep := false - for _, item := range deps.Items { - if item.GetName() == dep.Name && item.GetNamespace() == dep.Namespace { - hasDep = true - break - } - } - Expect(hasDep).To(BeTrue()) - close(done) - }, serverSideTimeoutSeconds) + By("validating the Deployment no longer exists") + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, depName, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) - It("should return an empty list if there are no matching objects", func(done Done) { + It("should delete an existing object non-namespace object from a go struct", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) - By("listing all Deployments in the cluster") - deps := &appsv1.DeploymentList{} - Expect(cl.List(context.Background(), deps)).NotTo(HaveOccurred()) + By("initially creating a Node") + node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) - By("validating no Deployments are returned") - Expect(deps.Items).To(BeEmpty()) + By("deleting the Node") + nodeName := node.Name + u := &unstructured.Unstructured{} + Expect(scheme.Convert(node, u, nil)).To(Succeed()) + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Kind: "Node", + Version: "v1", + }) + err = cl.Delete(ctx, u) + Expect(err).NotTo(HaveOccurred()) - close(done) - }, serverSideTimeoutSeconds) + By("validating the Node no longer exists") + _, err = clientset.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) - // TODO(seans): get label selector test working - It("should filter results by label selector", func(done Done) { - By("creating a Deployment with the app=frontend label") - depFrontend := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ + It("should fail if the object does not exist", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("Deleting node before it is ever created") + u := &unstructured.Unstructured{} + Expect(scheme.Convert(node, u, nil)).To(Succeed()) + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Kind: "Node", + Version: "v1", + }) + err = cl.Delete(ctx, node) + Expect(err).To(HaveOccurred()) + }) + + It("should delete a collection of object", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating two Deployments") + + dep2 := dep.DeepCopy() + dep2.Name += "-2" + + dep, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + dep2, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep2, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + depName := dep.Name + dep2Name := dep2.Name + + By("deleting Deployments") + u := &unstructured.Unstructured{} + Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + }) + err = cl.DeleteAllOf(ctx, u, client.InNamespace(ns), client.MatchingLabels(dep.ObjectMeta.Labels)) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Deployment no longer exists") + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, depName, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, dep2Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) + }) + Context("with metadata objects", func() { + It("should delete an existing object from a go struct", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("deleting the Deployment") + metaObj := metaOnlyFromObj(dep, scheme) + err = cl.Delete(ctx, metaObj) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Deployment no longer exists") + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) + + It("should delete an existing object non-namespace object from a go struct", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Node") + node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("deleting the Node") + metaObj := metaOnlyFromObj(node, scheme) + err = cl.Delete(ctx, metaObj) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Node no longer exists") + _, err = clientset.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) + + It("should fail if the object does not exist", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("Deleting node before it is ever created") + metaObj := metaOnlyFromObj(node, scheme) + err = cl.Delete(ctx, metaObj) + Expect(err).To(HaveOccurred()) + }) + + It("should delete a collection of object", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating two Deployments") + + dep2 := dep.DeepCopy() + dep2.Name += "-2" + + dep, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + dep2, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep2, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + depName := dep.Name + dep2Name := dep2.Name + + By("deleting Deployments") + metaObj := metaOnlyFromObj(dep, scheme) + err = cl.DeleteAllOf(ctx, metaObj, client.InNamespace(ns), client.MatchingLabels(dep.ObjectMeta.Labels)) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Deployment no longer exists") + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, depName, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, dep2Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) + }) + }) + + Describe("Get", func() { + Context("with structured objects", func() { + It("should fetch an existing object for a go struct", func(ctx SpecContext) { + By("first creating the Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("fetching the created Deployment") + var actual appsv1.Deployment + key := client.ObjectKey{Namespace: ns, Name: dep.Name} + err = cl.Get(ctx, key, &actual) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + + By("validating the fetched deployment equals the created one") + Expect(dep).To(Equal(&actual)) + }) + + It("should fetch an existing non-namespace object for a go struct", func(ctx SpecContext) { + By("first creating the object") + node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("retrieving node through client") + var actual corev1.Node + key := client.ObjectKey{Namespace: ns, Name: node.Name} + err = cl.Get(ctx, key, &actual) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + + Expect(node).To(Equal(&actual)) + }) + + It("should fail if the object does not exist", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("fetching object that has not been created yet") + key := client.ObjectKey{Namespace: ns, Name: dep.Name} + var actual appsv1.Deployment + err = cl.Get(ctx, key, &actual) + Expect(err).To(HaveOccurred()) + }) + + PIt("should fail if the object doesn't have meta", func() { + + }) + + It("should fail if the object cannot be mapped to a GVK", func(ctx SpecContext) { + By("first creating the Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a client with an empty Scheme") + emptyScheme := runtime.NewScheme() + cl, err := client.New(cfg, client.Options{Scheme: emptyScheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("fetching the created Deployment fails") + var actual appsv1.Deployment + key := client.ObjectKey{Namespace: ns, Name: dep.Name} + err = cl.Get(ctx, key, &actual) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no kind is registered for the type")) + }) + + PIt("should fail if the GVK cannot be mapped to a Resource", func() { + + }) + + // Test this with an integrated type and a CRD to make sure it covers both proto + // and json deserialization. + for idx, object := range []client.Object{&corev1.ConfigMap{}, &pkg.ChaosPod{}} { + It(fmt.Sprintf("should not retain any data in the obj variable that is not on the server for %T", object), func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + object.SetName(fmt.Sprintf("retain-test-%d", idx)) + object.SetNamespace(ns) + + By("First creating the object") + toCreate := object.DeepCopyObject().(client.Object) + Expect(cl.Create(ctx, toCreate)).NotTo(HaveOccurred()) + + By("Fetching it into a variable that has finalizers set") + toGetInto := object.DeepCopyObject().(client.Object) + toGetInto.SetFinalizers([]string{"some-finalizer"}) + Expect(cl.Get(ctx, client.ObjectKeyFromObject(object), toGetInto)).NotTo(HaveOccurred()) + + By("Ensuring the created and the received object are equal") + Expect(toCreate).Should(Equal(toGetInto)) + }) + } + + }) + + Context("with unstructured objects", func() { + It("should fetch an existing object", func(ctx SpecContext) { + By("first creating the Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("encoding the Deployment as unstructured") + var u runtime.Unstructured = &unstructured.Unstructured{} + Expect(scheme.Convert(dep, u, nil)).To(Succeed()) + + By("fetching the created Deployment") + var actual unstructured.Unstructured + actual.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + }) + key := client.ObjectKey{Namespace: ns, Name: dep.Name} + err = cl.Get(ctx, key, &actual) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + + By("validating the fetched Deployment equals the created one") + unstructured.RemoveNestedField(actual.Object, "spec", "template", "metadata", "creationTimestamp") + Expect(u).To(BeComparableTo(&actual)) + }) + + It("should fetch an existing non-namespace object", func(ctx SpecContext) { + By("first creating the Node") + node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("encoding the Node as unstructured") + var u runtime.Unstructured = &unstructured.Unstructured{} + Expect(scheme.Convert(node, u, nil)).To(Succeed()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("fetching the created Node") + var actual unstructured.Unstructured + actual.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Kind: "Node", + Version: "v1", + }) + key := client.ObjectKey{Namespace: ns, Name: node.Name} + err = cl.Get(ctx, key, &actual) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + + By("validating the fetched Node equals the created one") + Expect(u).To(Equal(&actual)) + }) + + It("should fail if the object does not exist", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("fetching object that has not been created yet") + key := client.ObjectKey{Namespace: ns, Name: dep.Name} + u := &unstructured.Unstructured{} + err = cl.Get(ctx, key, u) + Expect(err).To(HaveOccurred()) + }) + + It("should not retain any data in the obj variable that is not on the server", func(ctx SpecContext) { + object := &unstructured.Unstructured{} + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + object.SetName("retain-unstructured") + object.SetNamespace(ns) + object.SetAPIVersion("chaosapps.metamagical.io/v1") + object.SetKind("ChaosPod") + + By("First creating the object") + toCreate := object.DeepCopyObject().(client.Object) + Expect(cl.Create(ctx, toCreate)).NotTo(HaveOccurred()) + + By("Fetching it into a variable that has finalizers set") + toGetInto := object.DeepCopyObject().(client.Object) + toGetInto.SetFinalizers([]string{"some-finalizer"}) + Expect(cl.Get(ctx, client.ObjectKeyFromObject(object), toGetInto)).NotTo(HaveOccurred()) + + By("Ensuring the created and the received object are equal") + Expect(toCreate).Should(Equal(toGetInto)) + }) + }) + Context("with metadata objects", func() { + It("should fetch an existing object for a go struct", func(ctx SpecContext) { + By("first creating the Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("fetching the created Deployment") + var actual metav1.PartialObjectMetadata + gvk := schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + } + actual.SetGroupVersionKind(gvk) + key := client.ObjectKey{Namespace: ns, Name: dep.Name} + err = cl.Get(ctx, key, &actual) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + + By("validating that the GVK has been preserved") + Expect(actual.GroupVersionKind()).To(Equal(gvk)) + + By("validating the fetched deployment equals the created one") + Expect(metaOnlyFromObj(dep, scheme)).To(Equal(&actual)) + }) + + It("should fetch an existing non-namespace object for a go struct", func(ctx SpecContext) { + By("first creating the object") + node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("retrieving node through client") + var actual metav1.PartialObjectMetadata + actual.SetGroupVersionKind(schema.GroupVersionKind{ + Version: "v1", + Kind: "Node", + }) + key := client.ObjectKey{Namespace: ns, Name: node.Name} + err = cl.Get(ctx, key, &actual) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + + Expect(metaOnlyFromObj(node, scheme)).To(Equal(&actual)) + }) + + It("should fail if the object does not exist", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("fetching object that has not been created yet") + key := client.ObjectKey{Namespace: ns, Name: dep.Name} + var actual metav1.PartialObjectMetadata + actual.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + }) + err = cl.Get(ctx, key, &actual) + Expect(err).To(HaveOccurred()) + }) + + PIt("should fail if the object doesn't have meta", func() { + + }) + + PIt("should fail if the GVK cannot be mapped to a Resource", func() { + + }) + + It("should not retain any data in the obj variable that is not on the server", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("First creating the object") + toCreate := &pkg.ChaosPod{ObjectMeta: metav1.ObjectMeta{Name: "retain-metadata", Namespace: ns}} + Expect(cl.Create(ctx, toCreate)).NotTo(HaveOccurred()) + + By("Fetching it into a variable that has finalizers set") + toGetInto := &metav1.PartialObjectMetadata{ + TypeMeta: metav1.TypeMeta{APIVersion: "chaosapps.metamagical.io/v1", Kind: "ChaosPod"}, + ObjectMeta: metav1.ObjectMeta{Namespace: ns, Name: "retain-metadata"}, + } + toGetInto.SetFinalizers([]string{"some-finalizer"}) + Expect(cl.Get(ctx, client.ObjectKeyFromObject(toGetInto), toGetInto)).NotTo(HaveOccurred()) + + By("Ensuring the created and the received objects metadata are equal") + Expect(toCreate.ObjectMeta).Should(Equal(toGetInto.ObjectMeta)) + }) + }) + }) + + Describe("List", func() { + Context("with structured objects", func() { + It("should fetch collection of objects", func(ctx SpecContext) { + By("creating an initial object") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all objects of that type in the cluster") + deps := &appsv1.DeploymentList{} + Expect(cl.List(ctx, deps)).NotTo(HaveOccurred()) + + Expect(deps.Items).NotTo(BeEmpty()) + hasDep := false + for _, item := range deps.Items { + if item.Name == dep.Name && item.Namespace == dep.Namespace { + hasDep = true + break + } + } + Expect(hasDep).To(BeTrue()) + }) + + It("should fetch unstructured collection of objects", func(ctx SpecContext) { + By("create an initial object") + _, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all objects of that type in the cluster") + deps := &unstructured.UnstructuredList{} + deps.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "DeploymentList", + Version: "v1", + }) + err = cl.List(ctx, deps) + Expect(err).NotTo(HaveOccurred()) + + Expect(deps.Items).NotTo(BeEmpty()) + hasDep := false + for _, item := range deps.Items { + Expect(item.GroupVersionKind()).To(Equal(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + })) + if item.GetName() == dep.Name && item.GetNamespace() == dep.Namespace { + hasDep = true + break + } + } + Expect(hasDep).To(BeTrue()) + }) + + It("should fetch unstructured collection of objects, even if scheme is empty", func(ctx SpecContext) { + By("create an initial object") + _, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{Scheme: runtime.NewScheme()}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all objects of that type in the cluster") + deps := &unstructured.UnstructuredList{} + deps.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "DeploymentList", + Version: "v1", + }) + err = cl.List(ctx, deps) + Expect(err).NotTo(HaveOccurred()) + + Expect(deps.Items).NotTo(BeEmpty()) + hasDep := false + for _, item := range deps.Items { + if item.GetName() == dep.Name && item.GetNamespace() == dep.Namespace { + hasDep = true + break + } + } + Expect(hasDep).To(BeTrue()) + }) + + It("should return an empty list if there are no matching objects", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments in the cluster") + deps := &appsv1.DeploymentList{} + Expect(cl.List(ctx, deps)).NotTo(HaveOccurred()) + + By("validating no Deployments are returned") + Expect(deps.Items).To(BeEmpty()) + }) + + // TODO(seans): get label selector test working + It("should filter results by label selector", func(ctx SpecContext) { + By("creating a Deployment with the app=frontend label") + depFrontend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ Name: "deployment-frontend", Namespace: ns, Labels: map[string]string{"app": "frontend"}, @@ -1451,7 +2530,7 @@ var _ = Describe("Client", func() { }, }, } - depFrontend, err := clientset.AppsV1().Deployments(ns).Create(depFrontend) + depFrontend, err := clientset.AppsV1().Deployments(ns).Create(ctx, depFrontend, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) By("creating a Deployment with the app=backend label") @@ -1471,7 +2550,7 @@ var _ = Describe("Client", func() { }, }, } - depBackend, err = clientset.AppsV1().Deployments(ns).Create(depBackend) + depBackend, err = clientset.AppsV1().Deployments(ns).Create(ctx, depBackend, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) cl, err := client.New(cfg, client.Options{}) @@ -1480,28 +2559,373 @@ var _ = Describe("Client", func() { By("listing all Deployments with label app=backend") deps := &appsv1.DeploymentList{} labels := map[string]string{"app": "backend"} - err = cl.List(context.Background(), deps, client.MatchingLabels(labels)) + err = cl.List(ctx, deps, client.MatchingLabels(labels)) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment with the backend label is returned") + Expect(deps.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(deps.Items))) + actual := deps.Items[0] + Expect(actual.Name).To(Equal("deployment-backend")) + + deleteDeployment(ctx, depFrontend, ns) + deleteDeployment(ctx, depBackend, ns) + }) + + It("should filter results by namespace selector", func(ctx SpecContext) { + By("creating a Deployment in test-namespace-1") + tns1 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-1"}} + _, err := clientset.CoreV1().Namespaces().Create(ctx, tns1, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + depFrontend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-frontend", Namespace: "test-namespace-1"}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend, err = clientset.AppsV1().Deployments("test-namespace-1").Create(ctx, depFrontend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment in test-namespace-2") + tns2 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-2"}} + _, err = clientset.CoreV1().Namespaces().Create(ctx, tns2, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + depBackend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-backend", Namespace: "test-namespace-2"}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend, err = clientset.AppsV1().Deployments("test-namespace-2").Create(ctx, depBackend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments in test-namespace-1") + deps := &appsv1.DeploymentList{} + err = cl.List(ctx, deps, client.InNamespace("test-namespace-1")) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment in test-namespace-1 is returned") + Expect(deps.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(deps.Items))) + actual := deps.Items[0] + Expect(actual.Name).To(Equal("deployment-frontend")) + + deleteDeployment(ctx, depFrontend, "test-namespace-1") + deleteDeployment(ctx, depBackend, "test-namespace-2") + deleteNamespace(ctx, tns1) + deleteNamespace(ctx, tns2) + }) + + It("should filter results by field selector", func(ctx SpecContext) { + By("creating a Deployment with name deployment-frontend") + depFrontend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-frontend", Namespace: ns}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend, err := clientset.AppsV1().Deployments(ns).Create(ctx, depFrontend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment with name deployment-backend") + depBackend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-backend", Namespace: ns}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend, err = clientset.AppsV1().Deployments(ns).Create(ctx, depBackend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments with field metadata.name=deployment-backend") + deps := &appsv1.DeploymentList{} + err = cl.List(ctx, deps, + client.MatchingFields{"metadata.name": "deployment-backend"}) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment with the backend field is returned") + Expect(deps.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(deps.Items))) + actual := deps.Items[0] + Expect(actual.Name).To(Equal("deployment-backend")) + + deleteDeployment(ctx, depFrontend, ns) + deleteDeployment(ctx, depBackend, ns) + }) + + It("should filter results by namespace selector and label selector", func(ctx SpecContext) { + By("creating a Deployment in test-namespace-3 with the app=frontend label") + tns3 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-3"}} + _, err := clientset.CoreV1().Namespaces().Create(ctx, tns3, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + depFrontend3 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-frontend", + Namespace: "test-namespace-3", + Labels: map[string]string{"app": "frontend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend3, err = clientset.AppsV1().Deployments("test-namespace-3").Create(ctx, depFrontend3, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment in test-namespace-3 with the app=backend label") + depBackend3 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-backend", + Namespace: "test-namespace-3", + Labels: map[string]string{"app": "backend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend3, err = clientset.AppsV1().Deployments("test-namespace-3").Create(ctx, depBackend3, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment in test-namespace-4 with the app=frontend label") + tns4 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-4"}} + _, err = clientset.CoreV1().Namespaces().Create(ctx, tns4, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + depFrontend4 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-frontend", + Namespace: "test-namespace-4", + Labels: map[string]string{"app": "frontend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend4, err = clientset.AppsV1().Deployments("test-namespace-4").Create(ctx, depFrontend4, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments in test-namespace-3 with label app=frontend") + deps := &appsv1.DeploymentList{} + labels := map[string]string{"app": "frontend"} + err = cl.List(ctx, deps, + client.InNamespace("test-namespace-3"), + client.MatchingLabels(labels), + ) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment in test-namespace-3 with label app=frontend is returned") + Expect(deps.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(deps.Items))) + actual := deps.Items[0] + Expect(actual.Name).To(Equal("deployment-frontend")) + Expect(actual.Namespace).To(Equal("test-namespace-3")) + + deleteDeployment(ctx, depFrontend3, "test-namespace-3") + deleteDeployment(ctx, depBackend3, "test-namespace-3") + deleteDeployment(ctx, depFrontend4, "test-namespace-4") + deleteNamespace(ctx, tns3) + deleteNamespace(ctx, tns4) + }) + + It("should filter results using limit and continue options", func(ctx SpecContext) { + + makeDeployment := func(suffix string) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("deployment-%s", suffix), + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + } + + By("creating 4 deployments") + dep1 := makeDeployment("1") + dep1, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep1, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(ctx, dep1, ns) + + dep2 := makeDeployment("2") + dep2, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep2, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(ctx, dep2, ns) + + dep3 := makeDeployment("3") + dep3, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep3, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(ctx, dep3, ns) + + dep4 := makeDeployment("4") + dep4, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep4, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(ctx, dep4, ns) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing 1 deployment when limit=1 is used") + deps := &appsv1.DeploymentList{} + err = cl.List(ctx, deps, + client.Limit(1), + ) + Expect(err).NotTo(HaveOccurred()) + + Expect(deps.Items).To(HaveLen(1)) + Expect(deps.Continue).NotTo(BeEmpty()) + Expect(deps.Items[0].Name).To(Equal(dep1.Name)) + + continueToken := deps.Continue + + By("listing the next deployment when previous continuation token is used and limit=1") + deps = &appsv1.DeploymentList{} + err = cl.List(ctx, deps, + client.Limit(1), + client.Continue(continueToken), + ) + Expect(err).NotTo(HaveOccurred()) + + Expect(deps.Items).To(HaveLen(1)) + Expect(deps.Continue).NotTo(BeEmpty()) + Expect(deps.Items[0].Name).To(Equal(dep2.Name)) + + continueToken = deps.Continue + + By("listing the 2 remaining deployments when previous continuation token is used without a limit") + deps = &appsv1.DeploymentList{} + err = cl.List(ctx, deps, + client.Continue(continueToken), + ) + Expect(err).NotTo(HaveOccurred()) + + Expect(deps.Items).To(HaveLen(2)) + Expect(deps.Continue).To(BeEmpty()) + Expect(deps.Items[0].Name).To(Equal(dep3.Name)) + Expect(deps.Items[1].Name).To(Equal(dep4.Name)) + }) + + PIt("should fail if the object doesn't have meta", func() { + + }) + + PIt("should fail if the object cannot be mapped to a GVK", func() { + + }) + + PIt("should fail if the GVK cannot be mapped to a Resource", func() { + + }) + }) + + Context("with unstructured objects", func() { + It("should fetch collection of objects", func(ctx SpecContext) { + By("create an initial object") + _, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all objects of that type in the cluster") + deps := &unstructured.UnstructuredList{} + deps.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "DeploymentList", + Version: "v1", + }) + err = cl.List(ctx, deps) + Expect(err).NotTo(HaveOccurred()) + + Expect(deps.Items).NotTo(BeEmpty()) + hasDep := false + for _, item := range deps.Items { + if item.GetName() == dep.Name && item.GetNamespace() == dep.Namespace { + hasDep = true + break + } + } + Expect(hasDep).To(BeTrue()) + }) - By("only the Deployment with the backend label is returned") - Expect(deps.Items).NotTo(BeEmpty()) - Expect(1).To(Equal(len(deps.Items))) - actual := deps.Items[0] - Expect(actual.Name).To(Equal("deployment-backend")) + It("should return an empty list if there are no matching objects", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) - deleteDeployment(depFrontend, ns) - deleteDeployment(depBackend, ns) + By("listing all Deployments in the cluster") + deps := &unstructured.UnstructuredList{} + deps.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "DeploymentList", + Version: "v1", + }) + Expect(cl.List(ctx, deps)).NotTo(HaveOccurred()) - close(done) - }, serverSideTimeoutSeconds) + By("validating no Deployments are returned") + Expect(deps.Items).To(BeEmpty()) + }) - It("should filter results by namespace selector", func(done Done) { - By("creating a Deployment in test-namespace-1") - tns1 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-1"}} - _, err := clientset.CoreV1().Namespaces().Create(tns1) + It("should filter results by namespace selector", func(ctx SpecContext) { + By("creating a Deployment in test-namespace-5") + tns1 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-5"}} + _, err := clientset.CoreV1().Namespaces().Create(ctx, tns1, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) depFrontend := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "deployment-frontend", Namespace: "test-namespace-1"}, + ObjectMeta: metav1.ObjectMeta{Name: "deployment-frontend", Namespace: "test-namespace-5"}, Spec: appsv1.DeploymentSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"app": "frontend"}, @@ -1512,15 +2936,15 @@ var _ = Describe("Client", func() { }, }, } - depFrontend, err = clientset.AppsV1().Deployments("test-namespace-1").Create(depFrontend) + depFrontend, err = clientset.AppsV1().Deployments("test-namespace-5").Create(ctx, depFrontend, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("creating a Deployment in test-namespace-2") - tns2 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-2"}} - _, err = clientset.CoreV1().Namespaces().Create(tns2) + By("creating a Deployment in test-namespace-6") + tns2 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-6"}} + _, err = clientset.CoreV1().Namespaces().Create(ctx, tns2, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) depBackend := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "deployment-backend", Namespace: "test-namespace-2"}, + ObjectMeta: metav1.ObjectMeta{Name: "deployment-backend", Namespace: "test-namespace-6"}, Spec: appsv1.DeploymentSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"app": "backend"}, @@ -1531,32 +2955,35 @@ var _ = Describe("Client", func() { }, }, } - depBackend, err = clientset.AppsV1().Deployments("test-namespace-2").Create(depBackend) + depBackend, err = clientset.AppsV1().Deployments("test-namespace-6").Create(ctx, depBackend, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) - By("listing all Deployments in test-namespace-1") - deps := &appsv1.DeploymentList{} - err = cl.List(context.Background(), deps, client.InNamespace("test-namespace-1")) + By("listing all Deployments in test-namespace-5") + deps := &unstructured.UnstructuredList{} + deps.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "DeploymentList", + Version: "v1", + }) + err = cl.List(ctx, deps, client.InNamespace("test-namespace-5")) Expect(err).NotTo(HaveOccurred()) - By("only the Deployment in test-namespace-1 is returned") + By("only the Deployment in test-namespace-5 is returned") Expect(deps.Items).NotTo(BeEmpty()) Expect(1).To(Equal(len(deps.Items))) actual := deps.Items[0] - Expect(actual.Name).To(Equal("deployment-frontend")) - - deleteDeployment(depFrontend, "test-namespace-1") - deleteDeployment(depBackend, "test-namespace-2") - deleteNamespace(tns1) - deleteNamespace(tns2) + Expect(actual.GetName()).To(Equal("deployment-frontend")) - close(done) - }, serverSideTimeoutSeconds) + deleteDeployment(ctx, depFrontend, "test-namespace-5") + deleteDeployment(ctx, depBackend, "test-namespace-6") + deleteNamespace(ctx, tns1) + deleteNamespace(ctx, tns2) + }) - It("should filter results by field selector", func(done Done) { + It("should filter results by field selector", func(ctx SpecContext) { By("creating a Deployment with name deployment-frontend") depFrontend := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{Name: "deployment-frontend", Namespace: ns}, @@ -1570,7 +2997,7 @@ var _ = Describe("Client", func() { }, }, } - depFrontend, err := clientset.AppsV1().Deployments(ns).Create(depFrontend) + depFrontend, err := clientset.AppsV1().Deployments(ns).Create(ctx, depFrontend, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) By("creating a Deployment with name deployment-backend") @@ -1586,39 +3013,42 @@ var _ = Describe("Client", func() { }, }, } - depBackend, err = clientset.AppsV1().Deployments(ns).Create(depBackend) + depBackend, err = clientset.AppsV1().Deployments(ns).Create(ctx, depBackend, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) By("listing all Deployments with field metadata.name=deployment-backend") - deps := &appsv1.DeploymentList{} - err = cl.List(context.Background(), deps, - client.MatchingField("metadata.name", "deployment-backend")) + deps := &unstructured.UnstructuredList{} + deps.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "DeploymentList", + Version: "v1", + }) + err = cl.List(ctx, deps, + client.MatchingFields{"metadata.name": "deployment-backend"}) Expect(err).NotTo(HaveOccurred()) By("only the Deployment with the backend field is returned") Expect(deps.Items).NotTo(BeEmpty()) Expect(1).To(Equal(len(deps.Items))) actual := deps.Items[0] - Expect(actual.Name).To(Equal("deployment-backend")) - - deleteDeployment(depFrontend, ns) - deleteDeployment(depBackend, ns) + Expect(actual.GetName()).To(Equal("deployment-backend")) - close(done) - }, serverSideTimeoutSeconds) + deleteDeployment(ctx, depFrontend, ns) + deleteDeployment(ctx, depBackend, ns) + }) - It("should filter results by namespace selector and label selector", func(done Done) { - By("creating a Deployment in test-namespace-3 with the app=frontend label") - tns3 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-3"}} - _, err := clientset.CoreV1().Namespaces().Create(tns3) + It("should filter results by namespace selector and label selector", func(ctx SpecContext) { + By("creating a Deployment in test-namespace-7 with the app=frontend label") + tns3 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-7"}} + _, err := clientset.CoreV1().Namespaces().Create(ctx, tns3, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) depFrontend3 := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "deployment-frontend", - Namespace: "test-namespace-3", + Namespace: "test-namespace-7", Labels: map[string]string{"app": "frontend"}, }, Spec: appsv1.DeploymentSpec{ @@ -1631,14 +3061,14 @@ var _ = Describe("Client", func() { }, }, } - depFrontend3, err = clientset.AppsV1().Deployments("test-namespace-3").Create(depFrontend3) + depFrontend3, err = clientset.AppsV1().Deployments("test-namespace-7").Create(ctx, depFrontend3, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("creating a Deployment in test-namespace-3 with the app=backend label") + By("creating a Deployment in test-namespace-7 with the app=backend label") depBackend3 := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "deployment-backend", - Namespace: "test-namespace-3", + Namespace: "test-namespace-7", Labels: map[string]string{"app": "backend"}, }, Spec: appsv1.DeploymentSpec{ @@ -1651,17 +3081,17 @@ var _ = Describe("Client", func() { }, }, } - depBackend3, err = clientset.AppsV1().Deployments("test-namespace-3").Create(depBackend3) + depBackend3, err = clientset.AppsV1().Deployments("test-namespace-7").Create(ctx, depBackend3, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("creating a Deployment in test-namespace-4 with the app=frontend label") - tns4 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-4"}} - _, err = clientset.CoreV1().Namespaces().Create(tns4) + By("creating a Deployment in test-namespace-8 with the app=frontend label") + tns4 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-8"}} + _, err = clientset.CoreV1().Namespaces().Create(ctx, tns4, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) depFrontend4 := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "deployment-frontend", - Namespace: "test-namespace-4", + Namespace: "test-namespace-8", Labels: map[string]string{"app": "frontend"}, }, Spec: appsv1.DeploymentSpec{ @@ -1674,107 +3104,176 @@ var _ = Describe("Client", func() { }, }, } - depFrontend4, err = clientset.AppsV1().Deployments("test-namespace-4").Create(depFrontend4) + depFrontend4, err = clientset.AppsV1().Deployments("test-namespace-8").Create(ctx, depFrontend4, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) - By("listing all Deployments in test-namespace-3 with label app=frontend") - deps := &appsv1.DeploymentList{} + By("listing all Deployments in test-namespace-8 with label app=frontend") + deps := &unstructured.UnstructuredList{} + deps.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "DeploymentList", + Version: "v1", + }) labels := map[string]string{"app": "frontend"} - err = cl.List(context.Background(), deps, - client.InNamespace("test-namespace-3"), - client.MatchingLabels(labels), - ) + err = cl.List(ctx, deps, + client.InNamespace("test-namespace-7"), client.MatchingLabels(labels)) Expect(err).NotTo(HaveOccurred()) - By("only the Deployment in test-namespace-3 with label app=frontend is returned") + By("only the Deployment in test-namespace-7 with label app=frontend is returned") Expect(deps.Items).NotTo(BeEmpty()) Expect(1).To(Equal(len(deps.Items))) actual := deps.Items[0] - Expect(actual.Name).To(Equal("deployment-frontend")) - Expect(actual.Namespace).To(Equal("test-namespace-3")) - - deleteDeployment(depFrontend3, "test-namespace-3") - deleteDeployment(depBackend3, "test-namespace-3") - deleteDeployment(depFrontend4, "test-namespace-4") - deleteNamespace(tns3) - deleteNamespace(tns4) - - close(done) - }, serverSideTimeoutSeconds) - - PIt("should fail if the object doesn't have meta", func() { + Expect(actual.GetName()).To(Equal("deployment-frontend")) + Expect(actual.GetNamespace()).To(Equal("test-namespace-7")) + deleteDeployment(ctx, depFrontend3, "test-namespace-7") + deleteDeployment(ctx, depBackend3, "test-namespace-7") + deleteDeployment(ctx, depFrontend4, "test-namespace-8") + deleteNamespace(ctx, tns3) + deleteNamespace(ctx, tns4) }) - PIt("should fail if the object cannot be mapped to a GVK", func() { + PIt("should fail if the object doesn't have meta", func() { }) - PIt("should fail if the GVK cannot be mapped to a Resource", func() { + PIt("should filter results by namespace selector", func() { }) }) - - Context("with unstructured objects", func() { - It("should fetch collection of objects", func(done Done) { - By("create an initial object") - _, err := clientset.AppsV1().Deployments(ns).Create(dep) + Context("with metadata objects", func() { + It("should fetch collection of objects", func(ctx SpecContext) { + By("creating an initial object") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) By("listing all objects of that type in the cluster") - deps := &unstructured.UnstructuredList{} - deps.SetGroupVersionKind(schema.GroupVersionKind{ + gvk := schema.GroupVersionKind{ Group: "apps", - Kind: "DeploymentList", Version: "v1", - }) - err = cl.List(context.Background(), deps) - Expect(err).NotTo(HaveOccurred()) + Kind: "DeploymentList", + } + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(gvk) + Expect(cl.List(ctx, metaList)).NotTo(HaveOccurred()) - Expect(deps.Items).NotTo(BeEmpty()) + By("validating that the list GVK has been preserved") + Expect(metaList.GroupVersionKind()).To(Equal(gvk)) + + By("validating that the list has the expected deployment") + Expect(metaList.Items).NotTo(BeEmpty()) hasDep := false - for _, item := range deps.Items { - if item.GetName() == dep.Name && item.GetNamespace() == dep.Namespace { + for _, item := range metaList.Items { + Expect(item.GroupVersionKind()).To(Equal(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + })) + + if item.Name == dep.Name && item.Namespace == dep.Namespace { hasDep = true break } } Expect(hasDep).To(BeTrue()) - close(done) - }, serverSideTimeoutSeconds) + }) - It("should return an empty list if there are no matching objects", func(done Done) { + It("should return an empty list if there are no matching objects", func(ctx SpecContext) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) By("listing all Deployments in the cluster") - deps := &unstructured.UnstructuredList{} - deps.SetGroupVersionKind(schema.GroupVersionKind{ + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ Group: "apps", - Kind: "DeploymentList", Version: "v1", + Kind: "DeploymentList", }) - Expect(cl.List(context.Background(), deps)).NotTo(HaveOccurred()) + Expect(cl.List(ctx, metaList)).NotTo(HaveOccurred()) By("validating no Deployments are returned") - Expect(deps.Items).To(BeEmpty()) + Expect(metaList.Items).To(BeEmpty()) + }) + + // TODO(seans): get label selector test working + It("should filter results by label selector", func(ctx SpecContext) { + By("creating a Deployment with the app=frontend label") + depFrontend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-frontend", + Namespace: ns, + Labels: map[string]string{"app": "frontend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend, err := clientset.AppsV1().Deployments(ns).Create(ctx, depFrontend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment with the app=backend label") + depBackend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-backend", + Namespace: ns, + Labels: map[string]string{"app": "backend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend, err = clientset.AppsV1().Deployments(ns).Create(ctx, depBackend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) - close(done) - }, serverSideTimeoutSeconds) + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) - It("should filter results by namespace selector", func(done Done) { - By("creating a Deployment in test-namespace-5") - tns1 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-5"}} - _, err := clientset.CoreV1().Namespaces().Create(tns1) + By("listing all Deployments with label app=backend") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + labels := map[string]string{"app": "backend"} + err = cl.List(ctx, metaList, client.MatchingLabels(labels)) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment with the backend label is returned") + Expect(metaList.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(metaList.Items))) + actual := metaList.Items[0] + Expect(actual.Name).To(Equal("deployment-backend")) + + deleteDeployment(ctx, depFrontend, ns) + deleteDeployment(ctx, depBackend, ns) + }) + + It("should filter results by namespace selector", func(ctx SpecContext) { + By("creating a Deployment in test-namespace-1") + tns1 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-1"}} + _, err := clientset.CoreV1().Namespaces().Create(ctx, tns1, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) depFrontend := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "deployment-frontend", Namespace: "test-namespace-5"}, + ObjectMeta: metav1.ObjectMeta{Name: "deployment-frontend", Namespace: "test-namespace-1"}, Spec: appsv1.DeploymentSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"app": "frontend"}, @@ -1785,15 +3284,15 @@ var _ = Describe("Client", func() { }, }, } - depFrontend, err = clientset.AppsV1().Deployments("test-namespace-5").Create(depFrontend) + depFrontend, err = clientset.AppsV1().Deployments("test-namespace-1").Create(ctx, depFrontend, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("creating a Deployment in test-namespace-6") - tns2 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-6"}} - _, err = clientset.CoreV1().Namespaces().Create(tns2) + By("creating a Deployment in test-namespace-2") + tns2 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-2"}} + _, err = clientset.CoreV1().Namespaces().Create(ctx, tns2, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) depBackend := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: "deployment-backend", Namespace: "test-namespace-6"}, + ObjectMeta: metav1.ObjectMeta{Name: "deployment-backend", Namespace: "test-namespace-2"}, Spec: appsv1.DeploymentSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"app": "backend"}, @@ -1804,37 +3303,35 @@ var _ = Describe("Client", func() { }, }, } - depBackend, err = clientset.AppsV1().Deployments("test-namespace-6").Create(depBackend) + depBackend, err = clientset.AppsV1().Deployments("test-namespace-2").Create(ctx, depBackend, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) - By("listing all Deployments in test-namespace-5") - deps := &unstructured.UnstructuredList{} - deps.SetGroupVersionKind(schema.GroupVersionKind{ + By("listing all Deployments in test-namespace-1") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ Group: "apps", - Kind: "DeploymentList", Version: "v1", + Kind: "DeploymentList", }) - err = cl.List(context.Background(), deps, client.InNamespace("test-namespace-5")) + err = cl.List(ctx, metaList, client.InNamespace("test-namespace-1")) Expect(err).NotTo(HaveOccurred()) - By("only the Deployment in test-namespace-5 is returned") - Expect(deps.Items).NotTo(BeEmpty()) - Expect(1).To(Equal(len(deps.Items))) - actual := deps.Items[0] - Expect(actual.GetName()).To(Equal("deployment-frontend")) - - deleteDeployment(depFrontend, "test-namespace-5") - deleteDeployment(depBackend, "test-namespace-6") - deleteNamespace(tns1) - deleteNamespace(tns2) + By("only the Deployment in test-namespace-1 is returned") + Expect(metaList.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(metaList.Items))) + actual := metaList.Items[0] + Expect(actual.Name).To(Equal("deployment-frontend")) - close(done) - }, serverSideTimeoutSeconds) + deleteDeployment(ctx, depFrontend, "test-namespace-1") + deleteDeployment(ctx, depBackend, "test-namespace-2") + deleteNamespace(ctx, tns1) + deleteNamespace(ctx, tns2) + }) - It("should filter results by field selector", func(done Done) { + It("should filter results by field selector", func(ctx SpecContext) { By("creating a Deployment with name deployment-frontend") depFrontend := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{Name: "deployment-frontend", Namespace: ns}, @@ -1848,7 +3345,7 @@ var _ = Describe("Client", func() { }, }, } - depFrontend, err := clientset.AppsV1().Deployments(ns).Create(depFrontend) + depFrontend, err := clientset.AppsV1().Deployments(ns).Create(ctx, depFrontend, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) By("creating a Deployment with name deployment-backend") @@ -1864,44 +3361,42 @@ var _ = Describe("Client", func() { }, }, } - depBackend, err = clientset.AppsV1().Deployments(ns).Create(depBackend) + depBackend, err = clientset.AppsV1().Deployments(ns).Create(ctx, depBackend, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) By("listing all Deployments with field metadata.name=deployment-backend") - deps := &unstructured.UnstructuredList{} - deps.SetGroupVersionKind(schema.GroupVersionKind{ + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ Group: "apps", - Kind: "DeploymentList", Version: "v1", + Kind: "DeploymentList", }) - err = cl.List(context.Background(), deps, - client.MatchingField("metadata.name", "deployment-backend")) + err = cl.List(ctx, metaList, + client.MatchingFields{"metadata.name": "deployment-backend"}) Expect(err).NotTo(HaveOccurred()) By("only the Deployment with the backend field is returned") - Expect(deps.Items).NotTo(BeEmpty()) - Expect(1).To(Equal(len(deps.Items))) - actual := deps.Items[0] - Expect(actual.GetName()).To(Equal("deployment-backend")) - - deleteDeployment(depFrontend, ns) - deleteDeployment(depBackend, ns) + Expect(metaList.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(metaList.Items))) + actual := metaList.Items[0] + Expect(actual.Name).To(Equal("deployment-backend")) - close(done) - }, serverSideTimeoutSeconds) + deleteDeployment(ctx, depFrontend, ns) + deleteDeployment(ctx, depBackend, ns) + }) - It("should filter results by namespace selector and label selector", func(done Done) { - By("creating a Deployment in test-namespace-7 with the app=frontend label") - tns3 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-7"}} - _, err := clientset.CoreV1().Namespaces().Create(tns3) + It("should filter results by namespace selector and label selector", func(ctx SpecContext) { + By("creating a Deployment in test-namespace-3 with the app=frontend label") + tns3 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-3"}} + _, err := clientset.CoreV1().Namespaces().Create(ctx, tns3, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) depFrontend3 := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "deployment-frontend", - Namespace: "test-namespace-7", + Namespace: "test-namespace-3", Labels: map[string]string{"app": "frontend"}, }, Spec: appsv1.DeploymentSpec{ @@ -1914,14 +3409,14 @@ var _ = Describe("Client", func() { }, }, } - depFrontend3, err = clientset.AppsV1().Deployments("test-namespace-7").Create(depFrontend3) + depFrontend3, err = clientset.AppsV1().Deployments("test-namespace-3").Create(ctx, depFrontend3, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("creating a Deployment in test-namespace-7 with the app=backend label") + By("creating a Deployment in test-namespace-3 with the app=backend label") depBackend3 := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "deployment-backend", - Namespace: "test-namespace-7", + Namespace: "test-namespace-3", Labels: map[string]string{"app": "backend"}, }, Spec: appsv1.DeploymentSpec{ @@ -1934,17 +3429,17 @@ var _ = Describe("Client", func() { }, }, } - depBackend3, err = clientset.AppsV1().Deployments("test-namespace-7").Create(depBackend3) + depBackend3, err = clientset.AppsV1().Deployments("test-namespace-3").Create(ctx, depBackend3, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("creating a Deployment in test-namespace-8 with the app=frontend label") - tns4 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-8"}} - _, err = clientset.CoreV1().Namespaces().Create(tns4) + By("creating a Deployment in test-namespace-4 with the app=frontend label") + tns4 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-4"}} + _, err = clientset.CoreV1().Namespaces().Create(ctx, tns4, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) depFrontend4 := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "deployment-frontend", - Namespace: "test-namespace-8", + Namespace: "test-namespace-4", Labels: map[string]string{"app": "frontend"}, }, Spec: appsv1.DeploymentSpec{ @@ -1957,54 +3452,165 @@ var _ = Describe("Client", func() { }, }, } - depFrontend4, err = clientset.AppsV1().Deployments("test-namespace-8").Create(depFrontend4) + depFrontend4, err = clientset.AppsV1().Deployments("test-namespace-4").Create(ctx, depFrontend4, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments in test-namespace-3 with label app=frontend") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + labels := map[string]string{"app": "frontend"} + err = cl.List(ctx, metaList, + client.InNamespace("test-namespace-3"), + client.MatchingLabels(labels), + ) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment in test-namespace-3 with label app=frontend is returned") + Expect(metaList.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(metaList.Items))) + actual := metaList.Items[0] + Expect(actual.Name).To(Equal("deployment-frontend")) + Expect(actual.Namespace).To(Equal("test-namespace-3")) + + deleteDeployment(ctx, depFrontend3, "test-namespace-3") + deleteDeployment(ctx, depBackend3, "test-namespace-3") + deleteDeployment(ctx, depFrontend4, "test-namespace-4") + deleteNamespace(ctx, tns3) + deleteNamespace(ctx, tns4) + }) + + It("should filter results using limit and continue options", func(ctx SpecContext) { + makeDeployment := func(suffix string) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("deployment-%s", suffix), + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + } + + By("creating 4 deployments") + dep1 := makeDeployment("1") + dep1, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep1, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(ctx, dep1, ns) + + dep2 := makeDeployment("2") + dep2, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep2, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(ctx, dep2, ns) + + dep3 := makeDeployment("3") + dep3, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep3, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(ctx, dep3, ns) + + dep4 := makeDeployment("4") + dep4, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep4, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(ctx, dep4, ns) cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) - By("listing all Deployments in test-namespace-8 with label app=frontend") - deps := &unstructured.UnstructuredList{} - deps.SetGroupVersionKind(schema.GroupVersionKind{ + By("listing 1 deployment when limit=1 is used") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ Group: "apps", + Version: "v1", Kind: "DeploymentList", + }) + err = cl.List(ctx, metaList, + client.Limit(1), + ) + Expect(err).NotTo(HaveOccurred()) + + Expect(metaList.Items).To(HaveLen(1)) + Expect(metaList.Continue).NotTo(BeEmpty()) + Expect(metaList.Items[0].Name).To(Equal(dep1.Name)) + + continueToken := metaList.Continue + + By("listing the next deployment when previous continuation token is used and limit=1") + metaList = &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", Version: "v1", + Kind: "DeploymentList", }) - labels := map[string]string{"app": "frontend"} - err = cl.List(context.Background(), deps, - client.InNamespace("test-namespace-7"), client.MatchingLabels(labels)) + err = cl.List(ctx, metaList, + client.Limit(1), + client.Continue(continueToken), + ) Expect(err).NotTo(HaveOccurred()) - By("only the Deployment in test-namespace-7 with label app=frontend is returned") - Expect(deps.Items).NotTo(BeEmpty()) - Expect(1).To(Equal(len(deps.Items))) - actual := deps.Items[0] - Expect(actual.GetName()).To(Equal("deployment-frontend")) - Expect(actual.GetNamespace()).To(Equal("test-namespace-7")) + Expect(metaList.Items).To(HaveLen(1)) + Expect(metaList.Continue).NotTo(BeEmpty()) + Expect(metaList.Items[0].Name).To(Equal(dep2.Name)) - deleteDeployment(depFrontend3, "test-namespace-7") - deleteDeployment(depBackend3, "test-namespace-7") - deleteDeployment(depFrontend4, "test-namespace-8") - deleteNamespace(tns3) - deleteNamespace(tns4) + continueToken = metaList.Continue + + By("listing the 2 remaining deployments when previous continuation token is used without a limit") + metaList = &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + err = cl.List(ctx, metaList, + client.Continue(continueToken), + ) + Expect(err).NotTo(HaveOccurred()) - close(done) - }, serverSideTimeoutSeconds) + Expect(metaList.Items).To(HaveLen(2)) + Expect(metaList.Continue).To(BeEmpty()) + Expect(metaList.Items[0].Name).To(Equal(dep3.Name)) + Expect(metaList.Items[1].Name).To(Equal(dep4.Name)) + }) PIt("should fail if the object doesn't have meta", func() { }) + + PIt("should fail if the object cannot be mapped to a GVK", func() { + + }) + + PIt("should fail if the GVK cannot be mapped to a Resource", func() { + + }) }) }) Describe("CreateOptions", func() { It("should allow setting DryRun to 'all'", func() { co := &client.CreateOptions{} - client.CreateDryRunAll(co) + client.DryRunAll.ApplyToCreate(co) all := []string{metav1.DryRunAll} Expect(co.AsCreateOptions().DryRun).To(Equal(all)) }) + It("should allow setting the field manager", func() { + po := &client.CreateOptions{} + client.FieldOwner("some-owner").ApplyToCreate(po) + Expect(po.AsCreateOptions().FieldManager).To(Equal("some-owner")) + }) + It("should produce empty metav1.CreateOptions if nil", func() { var co *client.CreateOptions Expect(co.AsCreateOptions()).To(Equal(&metav1.CreateOptions{})) @@ -2016,7 +3622,7 @@ var _ = Describe("Client", func() { Describe("DeleteOptions", func() { It("should allow setting GracePeriodSeconds", func() { do := &client.DeleteOptions{} - client.GracePeriodSeconds(1)(do) + client.GracePeriodSeconds(1).ApplyToDelete(do) gp := int64(1) Expect(do.AsDeleteOptions().GracePeriodSeconds).To(Equal(&gp)) }) @@ -2024,18 +3630,25 @@ var _ = Describe("Client", func() { It("should allow setting Precondition", func() { do := &client.DeleteOptions{} pc := metav1.NewUIDPreconditions("uid") - client.Preconditions(pc)(do) + client.Preconditions(*pc).ApplyToDelete(do) Expect(do.AsDeleteOptions().Preconditions).To(Equal(pc)) Expect(do.Preconditions).To(Equal(pc)) }) It("should allow setting PropagationPolicy", func() { do := &client.DeleteOptions{} - client.PropagationPolicy(metav1.DeletePropagationForeground)(do) + client.PropagationPolicy(metav1.DeletePropagationForeground).ApplyToDelete(do) dp := metav1.DeletePropagationForeground Expect(do.AsDeleteOptions().PropagationPolicy).To(Equal(&dp)) }) + It("should allow setting DryRun", func() { + do := &client.DeleteOptions{} + client.DryRunAll.ApplyToDelete(do) + all := []string{metav1.DryRunAll} + Expect(do.AsDeleteOptions().DryRun).To(Equal(all)) + }) + It("should produce empty metav1.DeleteOptions if nil", func() { var do *client.DeleteOptions Expect(do.AsDeleteOptions()).To(Equal(&metav1.DeleteOptions{})) @@ -2048,9 +3661,9 @@ var _ = Describe("Client", func() { pc := metav1.NewUIDPreconditions("uid") dp := metav1.DeletePropagationForeground do := &client.DeleteOptions{} - do.ApplyOptions([]client.DeleteOptionFunc{ + do.ApplyOptions([]client.DeleteOption{ client.GracePeriodSeconds(gp), - client.Preconditions(pc), + client.Preconditions(*pc), client.PropagationPolicy(dp), }) Expect(do.GracePeriodSeconds).To(Equal(&gp)) @@ -2059,93 +3672,164 @@ var _ = Describe("Client", func() { }) }) - Describe("ListOptions", func() { - It("should be able to set a LabelSelector", func() { - lo := &client.ListOptions{} - err := lo.SetLabelSelector("foo=bar") - Expect(err).NotTo(HaveOccurred()) - Expect(lo.LabelSelector.String()).To(Equal("foo=bar")) + Describe("DeleteCollectionOptions", func() { + It("should be convertable to list options", func() { + gp := int64(1) + do := &client.DeleteAllOfOptions{} + do.ApplyOptions([]client.DeleteAllOfOption{ + client.GracePeriodSeconds(gp), + client.MatchingLabels{"foo": "bar"}, + }) + + listOpts := do.AsListOptions() + Expect(listOpts).NotTo(BeNil()) + Expect(listOpts.LabelSelector).To(Equal("foo=bar")) }) - It("should be able to set a FieldSelector", func() { - lo := &client.ListOptions{} - err := lo.SetFieldSelector("field1=bar") - Expect(err).NotTo(HaveOccurred()) - Expect(lo.FieldSelector.String()).To(Equal("field1=bar")) + It("should be convertable to delete options", func() { + gp := int64(1) + do := &client.DeleteAllOfOptions{} + do.ApplyOptions([]client.DeleteAllOfOption{ + client.GracePeriodSeconds(gp), + client.MatchingLabels{"foo": "bar"}, + }) + + deleteOpts := do.AsDeleteOptions() + Expect(deleteOpts).NotTo(BeNil()) + Expect(deleteOpts.GracePeriodSeconds).To(Equal(&gp)) }) + }) - It("should be converted to metav1.ListOptions", func() { - lo := &client.ListOptions{} - labels := map[string]string{"foo": "bar"} - mlo := lo.MatchingLabels(labels). - MatchingField("field1", "bar"). - InNamespace("test-namespace"). - AsListOptions() + Describe("GetOptions", func() { + It("should be convertable to metav1.GetOptions", func() { + o := (&client.GetOptions{}).ApplyOptions([]client.GetOption{ + &client.GetOptions{Raw: &metav1.GetOptions{ResourceVersion: "RV0"}}, + }) + mo := o.AsGetOptions() + Expect(mo).NotTo(BeNil()) + Expect(mo.ResourceVersion).To(Equal("RV0")) + }) + + It("should produce empty metav1.GetOptions if nil", func() { + var o *client.GetOptions + Expect(o.AsGetOptions()).To(Equal(&metav1.GetOptions{})) + o = &client.GetOptions{} + Expect(o.AsGetOptions()).To(Equal(&metav1.GetOptions{})) + }) + }) + + Describe("ListOptions", func() { + It("should be convertable to metav1.ListOptions", func() { + lo := (&client.ListOptions{}).ApplyOptions([]client.ListOption{ + client.MatchingFields{"field1": "bar"}, + client.InNamespace("test-namespace"), + client.MatchingLabels{"foo": "bar"}, + client.Limit(1), + client.Continue("foo"), + }) + mlo := lo.AsListOptions() Expect(mlo).NotTo(BeNil()) Expect(mlo.LabelSelector).To(Equal("foo=bar")) Expect(mlo.FieldSelector).To(Equal("field1=bar")) + Expect(mlo.Limit).To(Equal(int64(1))) + Expect(mlo.Continue).To(Equal("foo")) }) - It("should be able to set MatchingLabels", func() { + It("should be populated by MatchingLabels", func() { lo := &client.ListOptions{} - Expect(lo.LabelSelector).To(BeNil()) - labels := map[string]string{"foo": "bar"} - lo = lo.MatchingLabels(labels) + client.MatchingLabels{"foo": "bar"}.ApplyToList(lo) + Expect(lo).NotTo(BeNil()) Expect(lo.LabelSelector.String()).To(Equal("foo=bar")) }) - It("should be able to set MatchingField", func() { + It("should be populated by MatchingField", func() { lo := &client.ListOptions{} - Expect(lo.FieldSelector).To(BeNil()) - lo = lo.MatchingField("field1", "bar") + client.MatchingFields{"field1": "bar"}.ApplyToList(lo) + Expect(lo).NotTo(BeNil()) Expect(lo.FieldSelector.String()).To(Equal("field1=bar")) }) - It("should be able to set InNamespace", func() { + It("should be populated by InNamespace", func() { lo := &client.ListOptions{} - lo = lo.InNamespace("test-namespace") - Expect(lo.Namespace).To(Equal("test-namespace")) + client.InNamespace("test").ApplyToList(lo) + Expect(lo).NotTo(BeNil()) + Expect(lo.Namespace).To(Equal("test")) }) - It("should be created from MatchingLabels", func() { - labels := map[string]string{"foo": "bar"} - lo := &client.ListOptions{} - client.MatchingLabels(labels)(lo) - Expect(lo).NotTo(BeNil()) - Expect(lo.LabelSelector.String()).To(Equal("foo=bar")) + It("should produce empty metav1.ListOptions if nil", func() { + var do *client.ListOptions + Expect(do.AsListOptions()).To(Equal(&metav1.ListOptions{})) + do = &client.ListOptions{} + Expect(do.AsListOptions()).To(Equal(&metav1.ListOptions{})) }) - It("should be created from MatchingField", func() { + It("should be populated by Limit", func() { lo := &client.ListOptions{} - client.MatchingField("field1", "bar")(lo) + client.Limit(1).ApplyToList(lo) Expect(lo).NotTo(BeNil()) - Expect(lo.FieldSelector.String()).To(Equal("field1=bar")) + Expect(lo.Limit).To(Equal(int64(1))) }) - It("should be created from InNamespace", func() { - lo := &client.ListOptions{} - client.InNamespace("test")(lo) - Expect(lo).NotTo(BeNil()) - Expect(lo.Namespace).To(Equal("test")) + It("should ignore Limit when converted to metav1.ListOptions and watch is true", func() { + lo := &client.ListOptions{ + Raw: &metav1.ListOptions{Watch: true}, + } + lo.ApplyOptions([]client.ListOption{ + client.Limit(1), + }) + mlo := lo.AsListOptions() + Expect(mlo).NotTo(BeNil()) + Expect(mlo.Limit).To(BeZero()) }) - It("should allow pre-built ListOptions", func() { + It("should be populated by Continue", func() { lo := &client.ListOptions{} - newLo := &client.ListOptions{} - client.UseListOptions(newLo.InNamespace("test"))(lo) + client.Continue("foo").ApplyToList(lo) Expect(lo).NotTo(BeNil()) - Expect(lo.Namespace).To(Equal("test")) + Expect(lo.Continue).To(Equal("foo")) + }) + + It("should ignore Continue token when converted to metav1.ListOptions and watch is true", func() { + lo := &client.ListOptions{ + Raw: &metav1.ListOptions{Watch: true}, + } + lo.ApplyOptions([]client.ListOption{ + client.Continue("foo"), + }) + mlo := lo.AsListOptions() + Expect(mlo).NotTo(BeNil()) + Expect(mlo.Continue).To(BeEmpty()) + }) + + It("should ignore both Limit and Continue token when converted to metav1.ListOptions and watch is true", func() { + lo := &client.ListOptions{ + Raw: &metav1.ListOptions{Watch: true}, + } + lo.ApplyOptions([]client.ListOption{ + client.Limit(1), + client.Continue("foo"), + }) + mlo := lo.AsListOptions() + Expect(mlo).NotTo(BeNil()) + Expect(mlo.Limit).To(BeZero()) + Expect(mlo.Continue).To(BeEmpty()) }) }) Describe("UpdateOptions", func() { It("should allow setting DryRun to 'all'", func() { uo := &client.UpdateOptions{} - client.UpdateDryRunAll(uo) + client.DryRunAll.ApplyToUpdate(uo) all := []string{metav1.DryRunAll} Expect(uo.AsUpdateOptions().DryRun).To(Equal(all)) }) + It("should allow setting the field manager", func() { + po := &client.UpdateOptions{} + client.FieldOwner("some-owner").ApplyToUpdate(po) + Expect(po.AsUpdateOptions().FieldManager).To(Equal("some-owner")) + }) + It("should produce empty metav1.UpdateOptions if nil", func() { var co *client.UpdateOptions Expect(co.AsUpdateOptions()).To(Equal(&metav1.UpdateOptions{})) @@ -2157,14 +3841,14 @@ var _ = Describe("Client", func() { Describe("PatchOptions", func() { It("should allow setting DryRun to 'all'", func() { po := &client.PatchOptions{} - client.PatchDryRunAll(po) + client.DryRunAll.ApplyToPatch(po) all := []string{metav1.DryRunAll} Expect(po.AsPatchOptions().DryRun).To(Equal(all)) }) It("should allow setting Force to 'true'", func() { po := &client.PatchOptions{} - client.ForceOwnership(po) + client.ForceOwnership.ApplyToPatch(po) mpo := po.AsPatchOptions() Expect(mpo.Force).NotTo(BeNil()) Expect(*mpo.Force).To(BeTrue()) @@ -2172,7 +3856,7 @@ var _ = Describe("Client", func() { It("should allow setting the field manager", func() { po := &client.PatchOptions{} - client.FieldOwner("some-owner")(po) + client.FieldOwner("some-owner").ApplyToPatch(po) Expect(po.AsPatchOptions().FieldManager).To(Equal("some-owner")) }) @@ -2185,75 +3869,161 @@ var _ = Describe("Client", func() { }) }) -var _ = Describe("DelegatingReader", func() { +var _ = Describe("ClientWithCache", func() { Describe("Get", func() { - It("should call cache reader when structured object", func() { + It("should call cache reader when structured object", func(ctx SpecContext) { cachedReader := &fakeReader{} - clientReader := &fakeReader{} - dReader := client.DelegatingReader{ - CacheReader: cachedReader, - ClientReader: clientReader, - } + cl, err := client.New(cfg, client.Options{ + Cache: &client.CacheOptions{ + Reader: cachedReader, + }, + }) + Expect(err).NotTo(HaveOccurred()) var actual appsv1.Deployment key := client.ObjectKey{Namespace: "ns", Name: "name"} - Expect(dReader.Get(context.TODO(), key, &actual)).To(Succeed()) + Expect(cl.Get(ctx, key, &actual)).To(Succeed()) Expect(1).To(Equal(cachedReader.Called)) - Expect(0).To(Equal(clientReader.Called)) }) - It("should call client reader when structured object", func() { - cachedReader := &fakeReader{} - clientReader := &fakeReader{} - dReader := client.DelegatingReader{ - CacheReader: cachedReader, - ClientReader: clientReader, - } - var actual unstructured.Unstructured - key := client.ObjectKey{Namespace: "ns", Name: "name"} - Expect(dReader.Get(context.TODO(), key, &actual)).To(Succeed()) - Expect(0).To(Equal(cachedReader.Called)) - Expect(1).To(Equal(clientReader.Called)) + + When("getting unstructured objects", func() { + var dep *appsv1.Deployment + + BeforeEach(func(ctx SpecContext) { + dep = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment1", + Labels: map[string]string{"app": "frontend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "x", Image: "x"}}}, + }, + }, + } + var err error + dep, err = clientset.AppsV1().Deployments("default").Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + }) + AfterEach(func(ctx SpecContext) { + Expect(clientset.AppsV1().Deployments("default").Delete( + ctx, + dep.Name, + metav1.DeleteOptions{}, + )).To(Succeed()) + }) + It("should call client reader when not cached", func(ctx SpecContext) { + cachedReader := &fakeReader{} + cl, err := client.New(cfg, client.Options{ + Cache: &client.CacheOptions{ + Reader: cachedReader, + }, + }) + Expect(err).NotTo(HaveOccurred()) + + actual := &unstructured.Unstructured{} + actual.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + }) + actual.SetName(dep.Name) + key := client.ObjectKey{Namespace: dep.Namespace, Name: dep.Name} + Expect(cl.Get(ctx, key, actual)).To(Succeed()) + Expect(0).To(Equal(cachedReader.Called)) + }) + It("should call cache reader when cached", func(ctx SpecContext) { + cachedReader := &fakeReader{} + cl, err := client.New(cfg, client.Options{ + Cache: &client.CacheOptions{ + Reader: cachedReader, + Unstructured: true, + }, + }) + Expect(err).NotTo(HaveOccurred()) + + actual := &unstructured.Unstructured{} + actual.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + }) + actual.SetName(dep.Name) + key := client.ObjectKey{Namespace: dep.Namespace, Name: dep.Name} + Expect(cl.Get(ctx, key, actual)).To(Succeed()) + Expect(1).To(Equal(cachedReader.Called)) + }) }) }) Describe("List", func() { - It("should call cache reader when structured object", func() { + It("should call cache reader when structured object", func(ctx SpecContext) { cachedReader := &fakeReader{} - clientReader := &fakeReader{} - dReader := client.DelegatingReader{ - CacheReader: cachedReader, - ClientReader: clientReader, - } + cl, err := client.New(cfg, client.Options{ + Cache: &client.CacheOptions{ + Reader: cachedReader, + }, + }) + Expect(err).NotTo(HaveOccurred()) var actual appsv1.DeploymentList - Expect(dReader.List(context.Background(), &actual)).To(Succeed()) + Expect(cl.List(ctx, &actual)).To(Succeed()) Expect(1).To(Equal(cachedReader.Called)) - Expect(0).To(Equal(clientReader.Called)) - }) - It("should call client reader when structured object", func() { - cachedReader := &fakeReader{} - clientReader := &fakeReader{} - dReader := client.DelegatingReader{ - CacheReader: cachedReader, - ClientReader: clientReader, - } - var actual unstructured.UnstructuredList - Expect(dReader.List(context.Background(), &actual)).To(Succeed()) - Expect(0).To(Equal(cachedReader.Called)) - Expect(1).To(Equal(clientReader.Called)) + When("listing unstructured objects", func() { + It("should call client reader when not cached", func(ctx SpecContext) { + cachedReader := &fakeReader{} + cl, err := client.New(cfg, client.Options{ + Cache: &client.CacheOptions{ + Reader: cachedReader, + }, + }) + Expect(err).NotTo(HaveOccurred()) + + actual := &unstructured.UnstructuredList{} + actual.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "DeploymentList", + Version: "v1", + }) + Expect(cl.List(ctx, actual)).To(Succeed()) + Expect(0).To(Equal(cachedReader.Called)) + }) + It("should call cache reader when cached", func(ctx SpecContext) { + cachedReader := &fakeReader{} + cl, err := client.New(cfg, client.Options{ + Cache: &client.CacheOptions{ + Reader: cachedReader, + Unstructured: true, + }, + }) + Expect(err).NotTo(HaveOccurred()) + actual := &unstructured.UnstructuredList{} + actual.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "DeploymentList", + Version: "v1", + }) + Expect(cl.List(ctx, actual)).To(Succeed()) + Expect(1).To(Equal(cachedReader.Called)) + }) }) }) }) var _ = Describe("Patch", func() { - Describe("CreateMergePatch", func() { + Describe("MergeFrom", func() { var cm *corev1.ConfigMap BeforeEach(func() { cm = &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: "cm", + Namespace: metav1.NamespaceDefault, + Name: "cm", + ResourceVersion: "10", }, } }) @@ -2282,6 +4052,109 @@ var _ = Describe("Patch", func() { By("returning a patch with data only containing the annotation change") Expect(data).To(Equal([]byte(fmt.Sprintf(`{"metadata":{"annotations":{"%s":"%s"}}}`, annotationKey, annotationValue)))) }) + + It("creates a merge patch with the modifications applied during the mutation, using optimistic locking", func() { + const ( + annotationKey = "test" + annotationValue = "foo" + ) + + By("creating a merge patch") + patch := client.MergeFromWithOptions(cm.DeepCopy(), client.MergeFromWithOptimisticLock{}) + + By("returning a patch with type MergePatch") + Expect(patch.Type()).To(Equal(types.MergePatchType)) + + By("retrieving modifying the config map") + metav1.SetMetaDataAnnotation(&cm.ObjectMeta, annotationKey, annotationValue) + + By("computing the patch data") + data, err := patch.Data(cm) + + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning a patch with data containing the annotation change and the resourceVersion change") + Expect(data).To(Equal([]byte(fmt.Sprintf(`{"metadata":{"annotations":{"%s":"%s"},"resourceVersion":"%s"}}`, annotationKey, annotationValue, cm.ResourceVersion)))) + }) + }) + + Describe("StrategicMergeFrom", func() { + var dep *appsv1.Deployment + + BeforeEach(func() { + dep = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "dep", + ResourceVersion: "10", + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{Containers: []corev1.Container{{ + Name: "main", + Image: "foo:v1", + }, { + Name: "sidecar", + Image: "bar:v1", + }}}, + }, + }, + } + }) + + It("creates a strategic merge patch with the modifications applied during the mutation", func() { + By("creating a strategic merge patch") + patch := client.StrategicMergeFrom(dep.DeepCopy()) + + By("returning a patch with type StrategicMergePatchType") + Expect(patch.Type()).To(Equal(types.StrategicMergePatchType)) + + By("updating the main container's image") + for i, c := range dep.Spec.Template.Spec.Containers { + if c.Name == "main" { + c.Image = "foo:v2" + } + dep.Spec.Template.Spec.Containers[i] = c + } + + By("computing the patch data") + data, err := patch.Data(dep) + + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning a patch with data only containing the image change") + Expect(data).To(Equal([]byte(`{"spec":{"template":{"spec":{"$setElementOrder/containers":[{"name":"main"},` + + `{"name":"sidecar"}],"containers":[{"image":"foo:v2","name":"main"}]}}}}`))) + }) + + It("creates a strategic merge patch with the modifications applied during the mutation, using optimistic locking", func() { + By("creating a strategic merge patch") + patch := client.StrategicMergeFrom(dep.DeepCopy(), client.MergeFromWithOptimisticLock{}) + + By("returning a patch with type StrategicMergePatchType") + Expect(patch.Type()).To(Equal(types.StrategicMergePatchType)) + + By("updating the main container's image") + for i, c := range dep.Spec.Template.Spec.Containers { + if c.Name == "main" { + c.Image = "foo:v2" + } + dep.Spec.Template.Spec.Containers[i] = c + } + + By("computing the patch data") + data, err := patch.Data(dep) + + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning a patch with data containing the image change and the resourceVersion change") + Expect(data).To(Equal([]byte(fmt.Sprintf(`{"metadata":{"resourceVersion":"%s"},`+ + `"spec":{"template":{"spec":{"$setElementOrder/containers":[{"name":"main"},{"name":"sidecar"}],"containers":[{"image":"foo:v2","name":"main"}]}}}}`, + dep.ResourceVersion)))) + }) }) }) @@ -2311,16 +4184,65 @@ var _ = Describe("IgnoreNotFound", func() { }) }) +var _ = Describe("IgnoreAlreadyExists", func() { + It("should return nil on a 'AlreadyExists' error", func() { + By("creating a AlreadyExists error") + err := apierrors.NewAlreadyExists(schema.GroupResource{}, "") + + By("returning no error") + Expect(client.IgnoreAlreadyExists(err)).To(Succeed()) + }) + + It("should return the error on a status other than already exists", func() { + By("creating a BadRequest error") + err := apierrors.NewBadRequest("") + + By("returning an error") + Expect(client.IgnoreAlreadyExists(err)).To(HaveOccurred()) + }) + + It("should return the error on a non-status error", func() { + By("creating an fmt error") + err := fmt.Errorf("arbitrary error") + + By("returning an error") + Expect(client.IgnoreAlreadyExists(err)).To(HaveOccurred()) + }) +}) + type fakeReader struct { Called int } -func (f *fakeReader) Get(ctx context.Context, key client.ObjectKey, obj runtime.Object) error { - f.Called = f.Called + 1 +func (f *fakeReader) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + f.Called++ return nil } -func (f *fakeReader) List(ctx context.Context, list runtime.Object, opts ...client.ListOptionFunc) error { - f.Called = f.Called + 1 +func (f *fakeReader) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + f.Called++ return nil } + +type fakeUncachedReader struct { + Called int +} + +func (f *fakeUncachedReader) Get(_ context.Context, _ client.ObjectKey, _ client.Object, opts ...client.GetOption) error { + f.Called++ + return &cache.ErrResourceNotCached{} +} + +func (f *fakeUncachedReader) List(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { + f.Called++ + return &cache.ErrResourceNotCached{} +} + +func toUnstructured(o client.Object) (*unstructured.Unstructured, error) { + serialized, err := json.Marshal(o) + if err != nil { + return nil, err + } + u := &unstructured.Unstructured{} + return u, json.Unmarshal(serialized, u) +} diff --git a/pkg/client/codec.go b/pkg/client/codec.go new file mode 100644 index 0000000000..9c2923106c --- /dev/null +++ b/pkg/client/codec.go @@ -0,0 +1,40 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "errors" + "net/url" + + "k8s.io/apimachinery/pkg/conversion/queryparams" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var _ runtime.ParameterCodec = noConversionParamCodec{} + +// noConversionParamCodec is a no-conversion codec for serializing parameters into URL query strings. +// it's useful in scenarios with the unstructured client and arbitrary resources. +type noConversionParamCodec struct{} + +func (noConversionParamCodec) EncodeParameters(obj runtime.Object, to schema.GroupVersion) (url.Values, error) { + return queryparams.Convert(obj) +} + +func (noConversionParamCodec) DecodeParameters(parameters url.Values, from schema.GroupVersion, into runtime.Object) error { + return errors.New("DecodeParameters not implemented on noConversionParamCodec") +} diff --git a/pkg/client/config/config.go b/pkg/client/config/config.go index 8d022c5cfa..1c39f4d854 100644 --- a/pkg/client/config/config.go +++ b/pkg/client/config/config.go @@ -25,33 +25,46 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" ) +// KubeconfigFlagName is the name of the kubeconfig flag +const KubeconfigFlagName = "kubeconfig" + var ( - kubeconfig, apiServerURL string - log = logf.RuntimeLog.WithName("client").WithName("config") + kubeconfig string + log = logf.RuntimeLog.WithName("client").WithName("config") ) +// init registers the "kubeconfig" flag to the default command line FlagSet. +// TODO: This should be removed, as it potentially leads to redefined flag errors for users, if they already +// have registered the "kubeconfig" flag to the command line FlagSet in other parts of their code. func init() { - // TODO: Fix this to allow double vendoring this library but still register flags on behalf of users - flag.StringVar(&kubeconfig, "kubeconfig", "", - "Paths to a kubeconfig. Only required if out-of-cluster.") - - // This flag is deprecated, it'll be removed in a future iteration, please switch to --kubeconfig. - flag.StringVar(&apiServerURL, "master", "", - "(Deprecated: switch to `--kubeconfig`) The address of the Kubernetes API server. Overrides any value in kubeconfig. "+ - "Only required if out-of-cluster.") + RegisterFlags(flag.CommandLine) +} + +// RegisterFlags registers flag variables to the given FlagSet if not already registered. +// It uses the default command line FlagSet, if none is provided. Currently, it only registers the kubeconfig flag. +func RegisterFlags(fs *flag.FlagSet) { + if fs == nil { + fs = flag.CommandLine + } + if f := fs.Lookup(KubeconfigFlagName); f != nil { + kubeconfig = f.Value.String() + } else { + fs.StringVar(&kubeconfig, KubeconfigFlagName, "", "Paths to a kubeconfig. Only required if out-of-cluster.") + } } // GetConfig creates a *rest.Config for talking to a Kubernetes API server. // If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running // in cluster and use the cluster provided kubeconfig. // -// It also applies saner defaults for QPS and burst based on the Kubernetes -// controller manager defaults (20 QPS, 30 burst) +// The returned `*rest.Config` has client-side ratelimting disabled as we can rely on API priority and +// fairness. Set its QPS to a value equal or bigger than 0 to re-enable it. // -// Config precedence +// Config precedence: // // * --kubeconfig flag pointing at a file // @@ -59,50 +72,106 @@ func init() { // // * In-cluster config if running in cluster // -// * $HOME/.kube/config if exists +// * $HOME/.kube/config if exists. func GetConfig() (*rest.Config, error) { - cfg, err := loadConfig() + return GetConfigWithContext("") +} + +// GetConfigWithContext creates a *rest.Config for talking to a Kubernetes API server with a specific context. +// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running +// in cluster and use the cluster provided kubeconfig. +// +// The returned `*rest.Config` has client-side ratelimting disabled as we can rely on API priority and +// fairness. Set its QPS to a value equal or bigger than 0 to re-enable it. +// +// Config precedence: +// +// * --kubeconfig flag pointing at a file +// +// * KUBECONFIG environment variable pointing at a file +// +// * In-cluster config if running in cluster +// +// * $HOME/.kube/config if exists. +func GetConfigWithContext(context string) (*rest.Config, error) { + cfg, err := loadConfig(context) if err != nil { return nil, err } - if cfg.QPS == 0.0 { - cfg.QPS = 20.0 - cfg.Burst = 30.0 + // Disable client-side ratelimer by default, we can rely on + // API priority and fairness + cfg.QPS = -1 } - return cfg, nil } -// loadConfig loads a REST Config as per the rules specified in GetConfig -func loadConfig() (*rest.Config, error) { +// loadInClusterConfig is a function used to load the in-cluster +// Kubernetes client config. This variable makes is possible to +// test the precedence of loading the config. +var loadInClusterConfig = rest.InClusterConfig + +// loadConfig loads a REST Config as per the rules specified in GetConfig. +func loadConfig(context string) (config *rest.Config, configErr error) { // If a flag is specified with the config location, use that if len(kubeconfig) > 0 { - return clientcmd.BuildConfigFromFlags(apiServerURL, kubeconfig) - } - // If an env variable is specified with the config locaiton, use that - if len(os.Getenv("KUBECONFIG")) > 0 { - return clientcmd.BuildConfigFromFlags(apiServerURL, os.Getenv("KUBECONFIG")) + return loadConfigWithContext("", &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}, context) } - // If no explicit location, try the in-cluster config - if c, err := rest.InClusterConfig(); err == nil { - return c, nil - } - // If no in-cluster config, try the default location in the user's home directory - if usr, err := user.Current(); err == nil { - if c, err := clientcmd.BuildConfigFromFlags( - "", filepath.Join(usr.HomeDir, ".kube", "config")); err == nil { + + // If the recommended kubeconfig env variable is not specified, + // try the in-cluster config. + kubeconfigPath := os.Getenv(clientcmd.RecommendedConfigPathEnvVar) + if len(kubeconfigPath) == 0 { + c, err := loadInClusterConfig() + if err == nil { return c, nil } + + defer func() { + if configErr != nil { + log.Error(err, "unable to load in-cluster config") + } + }() + } + + // If the recommended kubeconfig env variable is set, or there + // is no in-cluster config, try the default recommended locations. + // + // NOTE: For default config file locations, upstream only checks + // $HOME for the user's home directory, but we can also try + // os/user.HomeDir when $HOME is unset. + // + // TODO(jlanford): could this be done upstream? + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + if _, ok := os.LookupEnv("HOME"); !ok { + u, err := user.Current() + if err != nil { + return nil, fmt.Errorf("could not get current user: %w", err) + } + loadingRules.Precedence = append(loadingRules.Precedence, filepath.Join(u.HomeDir, clientcmd.RecommendedHomeDir, clientcmd.RecommendedFileName)) } - return nil, fmt.Errorf("could not locate a kubeconfig") + return loadConfigWithContext("", loadingRules, context) +} + +func loadConfigWithContext(apiServerURL string, loader clientcmd.ClientConfigLoader, context string) (*rest.Config, error) { + return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + loader, + &clientcmd.ConfigOverrides{ + ClusterInfo: clientcmdapi.Cluster{ + Server: apiServerURL, + }, + CurrentContext: context, + }).ClientConfig() } // GetConfigOrDie creates a *rest.Config for talking to a Kubernetes apiserver. // If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running // in cluster and use the cluster provided kubeconfig. // +// The returned `*rest.Config` has client-side ratelimting disabled as we can rely on API priority and +// fairness. Set its QPS to a value equal or bigger than 0 to re-enable it. +// // Will log an error and exit if there is an error creating the rest.Config. func GetConfigOrDie() *rest.Config { config, err := GetConfig() diff --git a/pkg/client/config/config_suite_test.go b/pkg/client/config/config_suite_test.go new file mode 100644 index 0000000000..626613cef4 --- /dev/null +++ b/pkg/client/config/config_suite_test.go @@ -0,0 +1,36 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestConfig(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Client Config Test Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) +}) diff --git a/pkg/client/config/config_test.go b/pkg/client/config/config_test.go new file mode 100644 index 0000000000..bbaeb2e2bd --- /dev/null +++ b/pkg/client/config/config_test.go @@ -0,0 +1,235 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "os" + "path/filepath" + "strings" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +type testCase struct { + text string + context string + kubeconfigFlag string + kubeconfigEnv []string + wantHost string +} + +var _ = Describe("Config", func() { + + var dir string + + origRecommendedHomeFile := clientcmd.RecommendedHomeFile + + BeforeEach(func() { + // create temporary directory for test case + var err error + dir, err = os.MkdirTemp("", "cr-test") + Expect(err).NotTo(HaveOccurred()) + + // override $HOME/.kube/config + clientcmd.RecommendedHomeFile = filepath.Join(dir, ".kubeconfig") + }) + + AfterEach(func() { + _ = os.Unsetenv(clientcmd.RecommendedConfigPathEnvVar) + kubeconfig = "" + clientcmd.RecommendedHomeFile = origRecommendedHomeFile + + err := os.RemoveAll(dir) + Expect(err).NotTo(HaveOccurred()) + }) + + Describe("GetConfigWithContext", func() { + defineTests := func(testCases []testCase) { + for _, testCase := range testCases { + tc := testCase + It(tc.text, func() { + // set global and environment configs + setConfigs(tc, dir) + + // run the test + cfg, err := GetConfigWithContext(tc.context) + Expect(err).NotTo(HaveOccurred()) + Expect(cfg.Host).To(Equal(tc.wantHost)) + Expect(cfg.QPS).To(Equal(float32(-1))) + }) + } + } + + Context("when kubeconfig files don't exist", func() { + It("should fail", func() { + err := os.Unsetenv(clientcmd.RecommendedConfigPathEnvVar) + Expect(err).NotTo(HaveOccurred()) + + cfg, err := GetConfigWithContext("") + Expect(err).To(HaveOccurred()) + Expect(cfg).To(BeNil()) + }) + }) + + Context("when in-cluster", func() { + kubeconfigFiles := map[string]string{ + "kubeconfig-multi-context": genKubeconfig("from-multi-env-1", "from-multi-env-2"), + ".kubeconfig": genKubeconfig("from-home"), + } + BeforeEach(func() { + err := createFiles(kubeconfigFiles, dir) + Expect(err).NotTo(HaveOccurred()) + + // override in-cluster config loader + loadInClusterConfig = func() (*rest.Config, error) { + return &rest.Config{Host: "from-in-cluster"}, nil + } + }) + AfterEach(func() { loadInClusterConfig = rest.InClusterConfig }) + + testCases := []testCase{ + { + text: "should prefer the envvar over the in-cluster config", + kubeconfigEnv: []string{"kubeconfig-multi-context"}, + wantHost: "from-multi-env-1", + }, + { + text: "should prefer in-cluster over the recommended home file", + wantHost: "from-in-cluster", + }, + } + defineTests(testCases) + }) + + Context("when outside the cluster", func() { + kubeconfigFiles := map[string]string{ + "kubeconfig-flag": genKubeconfig("from-flag"), + "kubeconfig-multi-context": genKubeconfig("from-multi-env-1", "from-multi-env-2"), + "kubeconfig-env-1": genKubeconfig("from-env-1"), + "kubeconfig-env-2": genKubeconfig("from-env-2"), + ".kubeconfig": genKubeconfig("from-home"), + } + BeforeEach(func() { + err := createFiles(kubeconfigFiles, dir) + Expect(err).NotTo(HaveOccurred()) + }) + testCases := []testCase{ + { + text: "should use the --kubeconfig flag", + kubeconfigFlag: "kubeconfig-flag", + wantHost: "from-flag", + }, + { + text: "should use the envvar", + kubeconfigEnv: []string{"kubeconfig-multi-context"}, + wantHost: "from-multi-env-1", + }, + { + text: "should use the recommended home file", + wantHost: "from-home", + }, + { + text: "should prefer the flag over the envvar", + kubeconfigFlag: "kubeconfig-flag", + kubeconfigEnv: []string{"kubeconfig-multi-context"}, + wantHost: "from-flag", + }, + { + text: "should prefer the envvar over the recommended home file", + kubeconfigEnv: []string{"kubeconfig-multi-context"}, + wantHost: "from-multi-env-1", + }, + { + text: "should allow overriding the context", + context: "from-multi-env-2", + kubeconfigEnv: []string{"kubeconfig-multi-context"}, + wantHost: "from-multi-env-2", + }, + { + text: "should support a multi-value envvar", + context: "from-env-2", + kubeconfigEnv: []string{"kubeconfig-env-1", "kubeconfig-env-2"}, + wantHost: "from-env-2", + }, + } + defineTests(testCases) + }) + }) +}) + +func setConfigs(tc testCase, dir string) { + // Set kubeconfig flag value + if len(tc.kubeconfigFlag) > 0 { + kubeconfig = filepath.Join(dir, tc.kubeconfigFlag) + } + + // Set KUBECONFIG env value + if len(tc.kubeconfigEnv) > 0 { + kubeconfigEnvPaths := []string{} + for _, k := range tc.kubeconfigEnv { + kubeconfigEnvPaths = append(kubeconfigEnvPaths, filepath.Join(dir, k)) + } + os.Setenv(clientcmd.RecommendedConfigPathEnvVar, strings.Join(kubeconfigEnvPaths, ":")) + } +} + +func createFiles(files map[string]string, dir string) error { + for path, data := range files { + if err := os.WriteFile(filepath.Join(dir, path), []byte(data), 0644); err != nil { + return err + } + } + return nil +} + +func genKubeconfig(contexts ...string) string { + var sb strings.Builder + sb.WriteString(`--- +apiVersion: v1 +kind: Config +clusters: +`) + for _, ctx := range contexts { + sb.WriteString(`- cluster: + server: ` + ctx + ` + name: ` + ctx + ` +`) + } + sb.WriteString("contexts:\n") + for _, ctx := range contexts { + sb.WriteString(`- context: + cluster: ` + ctx + ` + user: ` + ctx + ` + name: ` + ctx + ` +`) + } + + sb.WriteString("users:\n") + for _, ctx := range contexts { + sb.WriteString(`- name: ` + ctx + ` +`) + } + sb.WriteString("preferences: {}\n") + if len(contexts) > 0 { + sb.WriteString("current-context: " + contexts[0] + "\n") + } + + return sb.String() +} diff --git a/pkg/client/doc.go b/pkg/client/doc.go index 6c13af211f..b2e2024942 100644 --- a/pkg/client/doc.go +++ b/pkg/client/doc.go @@ -17,7 +17,7 @@ limitations under the License. // Package client contains functionality for interacting with Kubernetes API // servers. // -// Clients +// # Clients // // Clients are split into two interfaces -- Readers and Writers. Readers // get and list, while writers create, update, and delete. @@ -25,18 +25,18 @@ limitations under the License. // The New function can be used to create a new client that talks directly // to the API server. // -// A common pattern in Kubernetes to read from a cache and write to the API -// server. This pattern is covered by the DelegatingClient type, which can -// be used to have a client whose Reader is different from the Writer. +// It is a common pattern in Kubernetes to read from a cache and write to the API +// server. This pattern is covered by the creating the Client with a Cache. // -// Options +// # Options // // Many client operations in Kubernetes support options. These options are // represented as variadic arguments at the end of a given method call. // For instance, to use a label selector on list, you can call -// err := someReader.List(context.Background(), &podList, client.MatchingLabels(someLabelMap)) // -// Indexing +// err := someReader.List(context.Background(), &podList, client.MatchingLabels{"somelabel": "someval"}) +// +// # Indexing // // Indexes may be added to caches using a FieldIndexer. This allows you to easily // and efficiently look up objects with certain properties. You can then make diff --git a/pkg/client/dryrun.go b/pkg/client/dryrun.go new file mode 100644 index 0000000000..fb7012200f --- /dev/null +++ b/pkg/client/dryrun.go @@ -0,0 +1,138 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// NewDryRunClient wraps an existing client and enforces DryRun mode +// on all mutating api calls. +func NewDryRunClient(c Client) Client { + return &dryRunClient{client: c} +} + +var _ Client = &dryRunClient{} + +// dryRunClient is a Client that wraps another Client in order to enforce DryRun mode. +type dryRunClient struct { + client Client +} + +// Scheme returns the scheme this client is using. +func (c *dryRunClient) Scheme() *runtime.Scheme { + return c.client.Scheme() +} + +// RESTMapper returns the rest mapper this client is using. +func (c *dryRunClient) RESTMapper() meta.RESTMapper { + return c.client.RESTMapper() +} + +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (c *dryRunClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return c.client.GroupVersionKindFor(obj) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (c *dryRunClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return c.client.IsObjectNamespaced(obj) +} + +// Create implements client.Client. +func (c *dryRunClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + return c.client.Create(ctx, obj, append(opts, DryRunAll)...) +} + +// Update implements client.Client. +func (c *dryRunClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + return c.client.Update(ctx, obj, append(opts, DryRunAll)...) +} + +// Delete implements client.Client. +func (c *dryRunClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + return c.client.Delete(ctx, obj, append(opts, DryRunAll)...) +} + +// DeleteAllOf implements client.Client. +func (c *dryRunClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + return c.client.DeleteAllOf(ctx, obj, append(opts, DryRunAll)...) +} + +// Patch implements client.Client. +func (c *dryRunClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + return c.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) +} + +func (c *dryRunClient) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...ApplyOption) error { + return c.client.Apply(ctx, obj, append(opts, DryRunAll)...) +} + +// Get implements client.Client. +func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + return c.client.Get(ctx, key, obj, opts...) +} + +// List implements client.Client. +func (c *dryRunClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + return c.client.List(ctx, obj, opts...) +} + +// Status implements client.StatusClient. +func (c *dryRunClient) Status() SubResourceWriter { + return c.SubResource("status") +} + +// SubResource implements client.SubResourceClient. +func (c *dryRunClient) SubResource(subResource string) SubResourceClient { + return &dryRunSubResourceClient{client: c.client.SubResource(subResource)} +} + +// ensure dryRunSubResourceWriter implements client.SubResourceWriter. +var _ SubResourceWriter = &dryRunSubResourceClient{} + +// dryRunSubResourceClient is client.SubResourceWriter that writes status subresource with dryRun mode +// enforced. +type dryRunSubResourceClient struct { + client SubResourceClient +} + +func (sw *dryRunSubResourceClient) Get(ctx context.Context, obj, subResource Object, opts ...SubResourceGetOption) error { + return sw.client.Get(ctx, obj, subResource, opts...) +} + +func (sw *dryRunSubResourceClient) Create(ctx context.Context, obj, subResource Object, opts ...SubResourceCreateOption) error { + return sw.client.Create(ctx, obj, subResource, append(opts, DryRunAll)...) +} + +// Update implements client.SubResourceWriter. +func (sw *dryRunSubResourceClient) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { + return sw.client.Update(ctx, obj, append(opts, DryRunAll)...) +} + +// Patch implements client.SubResourceWriter. +func (sw *dryRunSubResourceClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { + return sw.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) +} + +func (sw *dryRunSubResourceClient) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...SubResourceApplyOption) error { + return sw.client.Apply(ctx, obj, append(opts, DryRunAll)...) +} diff --git a/pkg/client/dryrun_test.go b/pkg/client/dryrun_test.go new file mode 100644 index 0000000000..35a9b63869 --- /dev/null +++ b/pkg/client/dryrun_test.go @@ -0,0 +1,296 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client_test + +import ( + "fmt" + "sync/atomic" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + appsv1applyconfigurations "k8s.io/client-go/applyconfigurations/apps/v1" + "k8s.io/utils/ptr" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("DryRunClient", func() { + var dep *appsv1.Deployment + var count uint64 = 0 + var replicaCount int32 = 2 + var ns = "default" + + getClient := func() client.Client { + cl, err := client.New(cfg, client.Options{DryRun: ptr.To(true)}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + return cl + } + + BeforeEach(func(ctx SpecContext) { + atomic.AddUint64(&count, 1) + dep = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("dry-run-deployment-%v", count), + Namespace: ns, + Labels: map[string]string{"name": fmt.Sprintf("dry-run-deployment-%v", count)}, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicaCount, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + + var err error + dep, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func(ctx SpecContext) { + deleteDeployment(ctx, dep, ns) + }) + + It("should successfully Get an object", func(ctx SpecContext) { + name := types.NamespacedName{Namespace: ns, Name: dep.Name} + result := &appsv1.Deployment{} + + Expect(getClient().Get(ctx, name, result)).NotTo(HaveOccurred()) + Expect(result).To(BeEquivalentTo(dep)) + }) + + It("should successfully List objects", func(ctx SpecContext) { + result := &appsv1.DeploymentList{} + opts := client.MatchingLabels(dep.Labels) + + Expect(getClient().List(ctx, result, opts)).NotTo(HaveOccurred()) + + Expect(len(result.Items)).To(BeEquivalentTo(1)) + Expect(result.Items[0]).To(BeEquivalentTo(*dep)) + }) + + It("should not create an object", func(ctx SpecContext) { + newDep := dep.DeepCopy() + newDep.Name = "new-deployment" + + Expect(getClient().Create(ctx, newDep)).ToNot(HaveOccurred()) + + _, err := clientset.AppsV1().Deployments(ns).Get(ctx, newDep.Name, metav1.GetOptions{}) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + It("should not create an object with opts", func(ctx SpecContext) { + newDep := dep.DeepCopy() + newDep.Name = "new-deployment" + opts := &client.CreateOptions{DryRun: []string{"Bye", "Pippa"}} + + Expect(getClient().Create(ctx, newDep, opts)).ToNot(HaveOccurred()) + + _, err := clientset.AppsV1().Deployments(ns).Get(ctx, newDep.Name, metav1.GetOptions{}) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + It("should refuse a create request for an invalid object", func(ctx SpecContext) { + changedDep := dep.DeepCopy() + changedDep.Spec.Template.Spec.Containers = nil + + err := getClient().Create(ctx, changedDep) + Expect(apierrors.IsInvalid(err)).To(BeTrue()) + }) + + It("should not change objects via update", func(ctx SpecContext) { + changedDep := dep.DeepCopy() + *changedDep.Spec.Replicas = 2 + + Expect(getClient().Update(ctx, changedDep)).ToNot(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) + + It("should not change objects via update with opts", func(ctx SpecContext) { + changedDep := dep.DeepCopy() + *changedDep.Spec.Replicas = 2 + opts := &client.UpdateOptions{DryRun: []string{"Bye", "Pippa"}} + + Expect(getClient().Update(ctx, changedDep, opts)).ToNot(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) + + It("should refuse an update request for an invalid change", func(ctx SpecContext) { + changedDep := dep.DeepCopy() + changedDep.Spec.Template.Spec.Containers = nil + + err := getClient().Update(ctx, changedDep) + Expect(apierrors.IsInvalid(err)).To(BeTrue()) + }) + + It("should not change objects via patch", func(ctx SpecContext) { + changedDep := dep.DeepCopy() + *changedDep.Spec.Replicas = 2 + + Expect(getClient().Patch(ctx, changedDep, client.MergeFrom(dep))).ToNot(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) + + It("should not change objects via patch with opts", func(ctx SpecContext) { + changedDep := dep.DeepCopy() + *changedDep.Spec.Replicas = 2 + opts := &client.PatchOptions{DryRun: []string{"Bye", "Pippa"}} + + Expect(getClient().Patch(ctx, changedDep, client.MergeFrom(dep), opts)).ToNot(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) + + It("should not delete objects", func(ctx SpecContext) { + Expect(getClient().Delete(ctx, dep)).NotTo(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) + + It("should not delete objects with opts", func(ctx SpecContext) { + opts := &client.DeleteOptions{DryRun: []string{"Bye", "Pippa"}} + + Expect(getClient().Delete(ctx, dep, opts)).NotTo(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) + + It("should not delete objects via deleteAllOf", func(ctx SpecContext) { + opts := []client.DeleteAllOfOption{client.InNamespace(ns), client.MatchingLabels(dep.Labels)} + + Expect(getClient().DeleteAllOf(ctx, dep, opts...)).NotTo(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) + + It("should not change objects via update status", func(ctx SpecContext) { + changedDep := dep.DeepCopy() + changedDep.Status.Replicas = 99 + + Expect(getClient().Status().Update(ctx, changedDep)).NotTo(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) + + It("should not change objects via update status with opts", func(ctx SpecContext) { + changedDep := dep.DeepCopy() + changedDep.Status.Replicas = 99 + opts := &client.SubResourceUpdateOptions{UpdateOptions: client.UpdateOptions{DryRun: []string{"Bye", "Pippa"}}} + + Expect(getClient().Status().Update(ctx, changedDep, opts)).NotTo(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) + + It("should not change objects via status patch", func(ctx SpecContext) { + changedDep := dep.DeepCopy() + changedDep.Status.Replicas = 99 + + Expect(getClient().Status().Patch(ctx, changedDep, client.MergeFrom(dep))).ToNot(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) + + It("should not change objects via status patch with opts", func(ctx SpecContext) { + changedDep := dep.DeepCopy() + changedDep.Status.Replicas = 99 + + opts := &client.SubResourcePatchOptions{PatchOptions: client.PatchOptions{DryRun: []string{"Bye", "Pippa"}}} + + Expect(getClient().Status().Patch(ctx, changedDep, client.MergeFrom(dep), opts)).ToNot(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) + + It("should not change objects via status apply", func(ctx SpecContext) { + deploymentAC, err := appsv1applyconfigurations.ExtractDeployment(dep, "test-owner") + Expect(err).NotTo(HaveOccurred()) + deploymentAC.WithStatus(&appsv1applyconfigurations.DeploymentStatusApplyConfiguration{ + Replicas: ptr.To(int32(99)), + }) + + Expect(getClient().Status().Apply(ctx, deploymentAC, client.FieldOwner("test-owner"))).NotTo(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) + + It("should not change objects via status apply with opts", func(ctx SpecContext) { + deploymentAC, err := appsv1applyconfigurations.ExtractDeployment(dep, "test-owner") + Expect(err).NotTo(HaveOccurred()) + deploymentAC.WithStatus(&appsv1applyconfigurations.DeploymentStatusApplyConfiguration{ + Replicas: ptr.To(int32(99)), + }) + + opts := &client.SubResourceApplyOptions{ApplyOptions: client.ApplyOptions{DryRun: []string{"Bye", "Pippa"}}} + + Expect(getClient().Status().Apply(ctx, deploymentAC, client.FieldOwner("test-owner"), opts)).NotTo(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual).To(BeEquivalentTo(dep)) + }) +}) diff --git a/pkg/client/example_test.go b/pkg/client/example_test.go index 9e29f5b3ff..390dc10143 100644 --- a/pkg/client/example_test.go +++ b/pkg/client/example_test.go @@ -20,14 +20,20 @@ import ( "context" "fmt" "os" + "time" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + corev1ac "k8s.io/client-go/applyconfigurations/core/v1" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) var ( @@ -51,7 +57,27 @@ func ExampleNew() { } } -// This example shows how to use the client with typed and unstructured objects to retrieve a objects. +func ExampleNew_suppress_warnings() { + cfg := config.GetConfigOrDie() + // Use a rest.WarningHandlerWithContext that discards warning messages. + cfg.WarningHandlerWithContext = rest.NoWarnings{} + + cl, err := client.New(cfg, client.Options{}) + if err != nil { + fmt.Println("failed to create client") + os.Exit(1) + } + + podList := &corev1.PodList{} + + err = cl.List(context.Background(), podList, client.InNamespace("default")) + if err != nil { + fmt.Printf("failed to list pods in namespace default: %v\n", err) + os.Exit(1) + } +} + +// This example shows how to use the client with typed and unstructured objects to retrieve an object. func ExampleClient_get() { // Using a typed object. pod := &corev1.Pod{} @@ -74,7 +100,7 @@ func ExampleClient_get() { }, u) } -// This example shows how to use the client with typed and unstrucurted objects to create objects. +// This example shows how to use the client with typed and unstructured objects to create objects. func ExampleClient_create() { // Using a typed object. pod := &corev1.Pod{ @@ -84,7 +110,7 @@ func ExampleClient_create() { }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ - corev1.Container{ + { Image: "nginx", Name: "nginx", }, @@ -97,8 +123,10 @@ func ExampleClient_create() { // Using a unstructured object. u := &unstructured.Unstructured{} u.Object = map[string]interface{}{ - "name": "name", - "namespace": "namespace", + "metadata": map[string]interface{}{ + "name": "name", + "namespace": "namespace", + }, "spec": map[string]interface{}{ "replicas": 2, "selector": map[string]interface{}{ @@ -129,7 +157,7 @@ func ExampleClient_create() { _ = c.Create(context.Background(), u) } -// This example shows how to use the client with typed and unstrucurted objects to list objects. +// This example shows how to use the client with typed and unstructured objects to list objects. func ExampleClient_list() { // Using a typed object. pod := &corev1.PodList{} @@ -146,7 +174,7 @@ func ExampleClient_list() { _ = c.List(context.Background(), u) } -// This example shows how to use the client with typed and unstrucurted objects to update objects. +// This example shows how to use the client with typed and unstructured objects to update objects. func ExampleClient_update() { // Using a typed object. pod := &corev1.Pod{} @@ -155,7 +183,7 @@ func ExampleClient_update() { Namespace: "namespace", Name: "name", }, pod) - pod.SetFinalizers(append(pod.GetFinalizers(), "new-finalizer")) + controllerutil.AddFinalizer(pod, "new-finalizer") _ = c.Update(context.Background(), pod) // Using a unstructured object. @@ -169,11 +197,52 @@ func ExampleClient_update() { Namespace: "namespace", Name: "name", }, u) - u.SetFinalizers(append(u.GetFinalizers(), "new-finalizer")) + controllerutil.AddFinalizer(u, "new-finalizer") _ = c.Update(context.Background(), u) } -// This example shows how to use the client with typed and unstrucurted objects to delete objects. +// This example shows how to use the client with typed and unstructured objects to patch objects. +func ExampleClient_patch() { + patch := []byte(`{"metadata":{"annotations":{"version": "v2"}}}`) + _ = c.Patch(context.Background(), &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "name", + }, + }, client.RawPatch(types.StrategicMergePatchType, patch)) +} + +// This example shows how to use the client with unstructured objects to create/patch objects using Server Side Apply, +// "k8s.io/apimachinery/pkg/runtime".DefaultUnstructuredConverter.ToUnstructured is used to convert an object into map[string]any representation, +// which is then set as an "Object" field in *unstructured.Unstructured struct, which implements client.Object. +func ExampleClient_apply() { + // Using a typed object. + configMap := corev1ac.ConfigMap("name", "namespace").WithData(map[string]string{"key": "value"}) + // c is a created client. + u := &unstructured.Unstructured{} + u.Object, _ = runtime.DefaultUnstructuredConverter.ToUnstructured(configMap) + _ = c.Patch(context.Background(), u, client.Apply, client.ForceOwnership, client.FieldOwner("field-owner")) +} + +// This example shows how to use the client with typed and unstructured objects to patch objects' status. +func ExampleClient_patchStatus() { + u := &unstructured.Unstructured{} + u.Object = map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "foo", + "namespace": "namespace", + }, + } + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "batch", + Version: "v1beta1", + Kind: "CronJob", + }) + patch := []byte(fmt.Sprintf(`{"status":{"lastScheduleTime":"%s"}}`, time.Now().Format(time.RFC3339))) + _ = c.Status().Patch(context.Background(), u, client.RawPatch(types.MergePatchType, patch)) +} + +// This example shows how to use the client with typed and unstructured objects to delete objects. func ExampleClient_delete() { // Using a typed object. pod := &corev1.Pod{ @@ -187,10 +256,8 @@ func ExampleClient_delete() { // Using a unstructured object. u := &unstructured.Unstructured{} - u.Object = map[string]interface{}{ - "name": "name", - "namespace": "namespace", - } + u.SetName("name") + u.SetNamespace("namespace") u.SetGroupVersionKind(schema.GroupVersionKind{ Group: "apps", Kind: "Deployment", @@ -199,10 +266,26 @@ func ExampleClient_delete() { _ = c.Delete(context.Background(), u) } +// This example shows how to use the client with typed and unstructured objects to delete collections of objects. +func ExampleClient_deleteAllOf() { + // Using a typed object. + // c is a created client. + _ = c.DeleteAllOf(context.Background(), &corev1.Pod{}, client.InNamespace("foo"), client.MatchingLabels{"app": "foo"}) + + // Using an unstructured Object + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + }) + _ = c.DeleteAllOf(context.Background(), u, client.InNamespace("foo"), client.MatchingLabels{"app": "foo"}) +} + // This example shows how to set up and consume a field selector over a pod's volumes' secretName field. -func ExampleFieldIndexer_secretName() { +func ExampleFieldIndexer_secretNameNode() { // someIndexer is a FieldIndexer over a Cache - _ = someIndexer.IndexField(&corev1.Pod{}, "spec.volumes.secret.secretName", func(o runtime.Object) []string { + _ = someIndexer.IndexField(context.TODO(), &corev1.Pod{}, "spec.volumes.secret.secretName", func(o client.Object) []string { var res []string for _, vol := range o.(*corev1.Pod).Spec.Volumes { if vol.Secret == nil { @@ -214,8 +297,20 @@ func ExampleFieldIndexer_secretName() { return res }) + _ = someIndexer.IndexField(context.TODO(), &corev1.Pod{}, "spec.NodeName", func(o client.Object) []string { + nodeName := o.(*corev1.Pod).Spec.NodeName + if nodeName != "" { + return []string{nodeName} + } + return nil + }) + // elsewhere (e.g. in your reconciler) mySecretName := "someSecret" // derived from the reconcile.Request, for instance + myNode := "master-0" var podsWithSecrets corev1.PodList - _ = c.List(context.Background(), &podsWithSecrets, client.MatchingField("spec.volumes.secret.secretName", mySecretName)) + _ = c.List(context.Background(), &podsWithSecrets, client.MatchingFields{ + "spec.volumes.secret.secretName": mySecretName, + "spec.NodeName": myNode, + }) } diff --git a/pkg/client/fake/client.go b/pkg/client/fake/client.go index 7a348a5867..62067cb19c 100644 --- a/pkg/client/fake/client.go +++ b/pkg/client/fake/client.go @@ -18,83 +18,460 @@ package fake import ( "context" - "encoding/json" + "errors" "fmt" + "reflect" "strings" + "sync" + "time" + /* + Stick with gopkg.in/evanphx/json-patch.v4 here to match + upstream Kubernetes code and avoid breaking changes introduced in v5. + - Kubernetes itself remains on json-patch v4 to avoid compatibility issues + tied to v5’s stricter RFC6902 compliance. + - The fake client code is adapted from client-go’s testing fixture, which also + relies on json-patch v4. + See: + https://github.com/kubernetes/kubernetes/pull/91622 (discussion of why K8s + stays on v4) + https://github.com/kubernetes/kubernetes/pull/120326 (v5.6.0+incompatible + missing a critical fix) + */ + + jsonpatch "gopkg.in/evanphx/json-patch.v4" + appsv1 "k8s.io/api/apps/v1" + authenticationv1 "k8s.io/api/authentication/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + corev1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/managedfields" + utilrand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/watch" + clientgoapplyconfigurations "k8s.io/client-go/applyconfigurations" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/testing" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/client/interceptor" + "sigs.k8s.io/controller-runtime/pkg/internal/field/selector" "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" ) type fakeClient struct { - tracker testing.ObjectTracker - scheme *runtime.Scheme + // trackerWriteLock must be acquired before writing to + // the tracker or performing reads that affect a following + // write. + trackerWriteLock sync.Mutex + tracker versionedTracker + + schemeLock sync.RWMutex + scheme *runtime.Scheme + + restMapper meta.RESTMapper + withStatusSubresource sets.Set[schema.GroupVersionKind] + + // indexes maps each GroupVersionKind (GVK) to the indexes registered for that GVK. + // The inner map maps from index name to IndexerFunc. + indexes map[schema.GroupVersionKind]map[string]client.IndexerFunc + // indexesLock must be held when accessing indexes. + indexesLock sync.RWMutex + + returnManagedFields bool } -var _ client.Client = &fakeClient{} +var _ client.WithWatch = &fakeClient{} + +const ( + maxNameLength = 63 + randomLength = 5 + maxGeneratedNameLength = maxNameLength - randomLength + + subResourceScale = "scale" +) // NewFakeClient creates a new fake client for testing. // You can choose to initialize it with a slice of runtime.Object. -// Deprecated: use NewFakeClientWithScheme. You should always be -// passing an explicit Scheme. -func NewFakeClient(initObjs ...runtime.Object) client.Client { - return NewFakeClientWithScheme(scheme.Scheme, initObjs...) +func NewFakeClient(initObjs ...runtime.Object) client.WithWatch { + return NewClientBuilder().WithRuntimeObjects(initObjs...).Build() } -// NewFakeClientWithScheme creates a new fake client with the given scheme -// for testing. -// You can choose to initialize it with a slice of runtime.Object. -func NewFakeClientWithScheme(clientScheme *runtime.Scheme, initObjs ...runtime.Object) client.Client { - tracker := testing.NewObjectTracker(clientScheme, scheme.Codecs.UniversalDecoder()) - for _, obj := range initObjs { - err := tracker.Add(obj) +// NewClientBuilder returns a new builder to create a fake client. +func NewClientBuilder() *ClientBuilder { + return &ClientBuilder{} +} + +// ClientBuilder builds a fake client. +type ClientBuilder struct { + scheme *runtime.Scheme + restMapper meta.RESTMapper + initObject []client.Object + initLists []client.ObjectList + initRuntimeObjects []runtime.Object + withStatusSubresource []client.Object + objectTracker testing.ObjectTracker + interceptorFuncs *interceptor.Funcs + typeConverters []managedfields.TypeConverter + returnManagedFields bool + isBuilt bool + + // indexes maps each GroupVersionKind (GVK) to the indexes registered for that GVK. + // The inner map maps from index name to IndexerFunc. + indexes map[schema.GroupVersionKind]map[string]client.IndexerFunc +} + +// WithScheme sets this builder's internal scheme. +// If not set, defaults to client-go's global scheme.Scheme. +func (f *ClientBuilder) WithScheme(scheme *runtime.Scheme) *ClientBuilder { + f.scheme = scheme + return f +} + +// WithRESTMapper sets this builder's restMapper. +// The restMapper is directly set as mapper in the Client. This can be used for example +// with a meta.DefaultRESTMapper to provide a static rest mapping. +// If not set, defaults to an empty meta.DefaultRESTMapper. +func (f *ClientBuilder) WithRESTMapper(restMapper meta.RESTMapper) *ClientBuilder { + f.restMapper = restMapper + return f +} + +// WithObjects can be optionally used to initialize this fake client with client.Object(s). +func (f *ClientBuilder) WithObjects(initObjs ...client.Object) *ClientBuilder { + f.initObject = append(f.initObject, initObjs...) + return f +} + +// WithLists can be optionally used to initialize this fake client with client.ObjectList(s). +func (f *ClientBuilder) WithLists(initLists ...client.ObjectList) *ClientBuilder { + f.initLists = append(f.initLists, initLists...) + return f +} + +// WithRuntimeObjects can be optionally used to initialize this fake client with runtime.Object(s). +func (f *ClientBuilder) WithRuntimeObjects(initRuntimeObjs ...runtime.Object) *ClientBuilder { + f.initRuntimeObjects = append(f.initRuntimeObjects, initRuntimeObjs...) + return f +} + +// WithObjectTracker can be optionally used to initialize this fake client with testing.ObjectTracker. +// Setting this is incompatible with setting WithTypeConverters, as they are a setting on the +// tracker. +func (f *ClientBuilder) WithObjectTracker(ot testing.ObjectTracker) *ClientBuilder { + f.objectTracker = ot + return f +} + +// WithIndex can be optionally used to register an index with name `field` and indexer `extractValue` +// for API objects of the same GroupVersionKind (GVK) as `obj` in the fake client. +// It can be invoked multiple times, both with objects of the same GVK or different ones. +// Invoking WithIndex twice with the same `field` and GVK (via `obj`) arguments will panic. +// WithIndex retrieves the GVK of `obj` using the scheme registered via WithScheme if +// WithScheme was previously invoked, the default scheme otherwise. +func (f *ClientBuilder) WithIndex(obj runtime.Object, field string, extractValue client.IndexerFunc) *ClientBuilder { + objScheme := f.scheme + if objScheme == nil { + objScheme = scheme.Scheme + } + + gvk, err := apiutil.GVKForObject(obj, objScheme) + if err != nil { + panic(err) + } + + // If this is the first index being registered, we initialize the map storing all the indexes. + if f.indexes == nil { + f.indexes = make(map[schema.GroupVersionKind]map[string]client.IndexerFunc) + } + + // If this is the first index being registered for the GroupVersionKind of `obj`, we initialize + // the map storing the indexes for that GroupVersionKind. + if f.indexes[gvk] == nil { + f.indexes[gvk] = make(map[string]client.IndexerFunc) + } + + if _, fieldAlreadyIndexed := f.indexes[gvk][field]; fieldAlreadyIndexed { + panic(fmt.Errorf("indexer conflict: field %s for GroupVersionKind %v is already indexed", + field, gvk)) + } + + f.indexes[gvk][field] = extractValue + + return f +} + +// WithStatusSubresource configures the passed object with a status subresource, which means +// calls to Update and Patch will not alter its status. +func (f *ClientBuilder) WithStatusSubresource(o ...client.Object) *ClientBuilder { + f.withStatusSubresource = append(f.withStatusSubresource, o...) + return f +} + +// WithInterceptorFuncs configures the client methods to be intercepted using the provided interceptor.Funcs. +func (f *ClientBuilder) WithInterceptorFuncs(interceptorFuncs interceptor.Funcs) *ClientBuilder { + f.interceptorFuncs = &interceptorFuncs + return f +} + +// WithTypeConverters sets the type converters for the fake client. The list is ordered and the first +// non-erroring converter is used. A type converter must be provided for all types the client is used +// for, otherwise it will error. +// +// This setting is incompatible with WithObjectTracker, as the type converters are a setting on the tracker. +// +// If unset, this defaults to: +// * clientgoapplyconfigurations.NewTypeConverter(scheme.Scheme), +// * managedfields.NewDeducedTypeConverter(), +// +// Be aware that the behavior of the `NewDeducedTypeConverter` might not match the behavior of the +// Kubernetes APIServer, it is recommended to provide a type converter for your types. TypeConverters +// are generated along with ApplyConfigurations. +func (f *ClientBuilder) WithTypeConverters(typeConverters ...managedfields.TypeConverter) *ClientBuilder { + f.typeConverters = append(f.typeConverters, typeConverters...) + return f +} + +// WithReturnManagedFields configures the fake client to return managedFields +// on objects. +func (f *ClientBuilder) WithReturnManagedFields() *ClientBuilder { + f.returnManagedFields = true + return f +} + +// Build builds and returns a new fake client. +func (f *ClientBuilder) Build() client.WithWatch { + if f.isBuilt { + panic("Build() must not be called multiple times when creating a ClientBuilder") + } + if f.scheme == nil { + f.scheme = scheme.Scheme + } + if f.restMapper == nil { + f.restMapper = meta.NewDefaultRESTMapper([]schema.GroupVersion{}) + } + + withStatusSubResource := sets.New(inTreeResourcesWithStatus()...) + for _, o := range f.withStatusSubresource { + gvk, err := apiutil.GVKForObject(o, f.scheme) if err != nil { - panic(fmt.Errorf("failed to add object %v to fake client: %v", obj, err)) + panic(fmt.Errorf("failed to get gvk for object %T: %w", withStatusSubResource, err)) + } + withStatusSubResource.Insert(gvk) + } + + if f.objectTracker != nil && len(f.typeConverters) > 0 { + panic(errors.New("WithObjectTracker and WithTypeConverters are incompatible")) + } + + var usesFieldManagedObjectTracker bool + if f.objectTracker == nil { + if len(f.typeConverters) == 0 { + // Use corresponding scheme to ensure the converter error + // for types it can't handle. + clientGoScheme := runtime.NewScheme() + if err := scheme.AddToScheme(clientGoScheme); err != nil { + panic(fmt.Sprintf("failed to construct client-go scheme: %v", err)) + } + f.typeConverters = []managedfields.TypeConverter{ + clientgoapplyconfigurations.NewTypeConverter(clientGoScheme), + managedfields.NewDeducedTypeConverter(), + } + } + f.objectTracker = testing.NewFieldManagedObjectTracker( + f.scheme, + serializer.NewCodecFactory(f.scheme).UniversalDecoder(), + multiTypeConverter{upstream: f.typeConverters}, + ) + usesFieldManagedObjectTracker = true + } + tracker := versionedTracker{ + upstream: f.objectTracker, + scheme: f.scheme, + withStatusSubresource: withStatusSubResource, + usesFieldManagedObjectTracker: usesFieldManagedObjectTracker, + } + + for _, obj := range f.initObject { + if err := tracker.Add(obj); err != nil { + panic(fmt.Errorf("failed to add object %v to fake client: %w", obj, err)) + } + } + for _, obj := range f.initLists { + if err := tracker.Add(obj); err != nil { + panic(fmt.Errorf("failed to add list %v to fake client: %w", obj, err)) + } + } + for _, obj := range f.initRuntimeObjects { + if err := tracker.Add(obj); err != nil { + panic(fmt.Errorf("failed to add runtime object %v to fake client: %w", obj, err)) } } - return &fakeClient{ - tracker: tracker, - scheme: clientScheme, + + var result client.WithWatch = &fakeClient{ + tracker: tracker, + scheme: f.scheme, + restMapper: f.restMapper, + indexes: f.indexes, + withStatusSubresource: withStatusSubResource, + returnManagedFields: f.returnManagedFields, + } + + if f.interceptorFuncs != nil { + result = interceptor.NewClient(result, *f.interceptorFuncs) + } + + f.isBuilt = true + return result +} + +const trackerAddResourceVersion = "999" + +// convertFromUnstructuredIfNecessary will convert runtime.Unstructured for a GVK that is recognized +// by the schema into the whatever the schema produces with New() for said GVK. +// This is required because the tracker unconditionally saves on manipulations, but its List() implementation +// tries to assign whatever it finds into a ListType it gets from schema.New() - Thus we have to ensure +// we save as the very same type, otherwise subsequent List requests will fail. +func convertFromUnstructuredIfNecessary(s *runtime.Scheme, o runtime.Object) (runtime.Object, error) { + u, isUnstructured := o.(runtime.Unstructured) + if !isUnstructured { + return o, nil + } + gvk := o.GetObjectKind().GroupVersionKind() + if !s.Recognizes(gvk) { + return o, nil + } + + typed, err := s.New(gvk) + if err != nil { + return nil, fmt.Errorf("scheme recognizes %s but failed to produce an object for it: %w", gvk, err) + } + if _, isTypedUnstructured := typed.(runtime.Unstructured); isTypedUnstructured { + return o, nil + } + + unstructuredSerialized, err := json.Marshal(u) + if err != nil { + return nil, fmt.Errorf("failed to serialize %T: %w", unstructuredSerialized, err) + } + if err := json.Unmarshal(unstructuredSerialized, typed); err != nil { + return nil, fmt.Errorf("failed to unmarshal the content of %T into %T: %w", u, typed, err) } + + return typed, nil } -func (c *fakeClient) Get(ctx context.Context, key client.ObjectKey, obj runtime.Object) error { +func (c *fakeClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + if err := c.addToSchemeIfUnknownAndUnstructuredOrPartial(obj); err != nil { + return err + } + + c.schemeLock.RLock() + defer c.schemeLock.RUnlock() gvr, err := getGVRFromObject(obj, c.scheme) if err != nil { return err } + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } o, err := c.tracker.Get(gvr, key.Namespace, key.Name) if err != nil { return err } + + ta, err := meta.TypeAccessor(o) + if err != nil { + return err + } + + // If the final object is unstructuctured, the json + // representation must contain GVK or the apimachinery + // json serializer will error out. + ta.SetAPIVersion(gvk.GroupVersion().String()) + ta.SetKind(gvk.Kind) + j, err := json.Marshal(o) if err != nil { return err } - decoder := scheme.Codecs.UniversalDecoder() - _, _, err = decoder.Decode(j, nil, obj) - return err + zero(obj) + if err := json.Unmarshal(j, obj); err != nil { + return err + } + + if !c.returnManagedFields { + obj.SetManagedFields(nil) + } + + return ensureTypeMeta(obj, gvk) +} + +func (c *fakeClient) Watch(ctx context.Context, list client.ObjectList, opts ...client.ListOption) (watch.Interface, error) { + if err := c.addToSchemeIfUnknownAndUnstructuredOrPartial(list); err != nil { + return nil, err + } + + c.schemeLock.RLock() + defer c.schemeLock.RUnlock() + + gvk, err := apiutil.GVKForObject(list, c.scheme) + if err != nil { + return nil, err + } + + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + listOpts := client.ListOptions{} + listOpts.ApplyOptions(opts) + + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + return c.tracker.Watch(gvr, listOpts.Namespace) } -func (c *fakeClient) List(ctx context.Context, obj runtime.Object, opts ...client.ListOptionFunc) error { +func (c *fakeClient) List(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + if err := c.addToSchemeIfUnknownAndUnstructuredOrPartial(obj); err != nil { + return err + } + + c.schemeLock.RLock() + defer c.schemeLock.RUnlock() gvk, err := apiutil.GVKForObject(obj, c.scheme) if err != nil { return err } - if !strings.HasSuffix(gvk.Kind, "List") { - return fmt.Errorf("non-list type %T (kind %q) passed as output", obj, gvk) + originalGVK := gvk + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + listGVK := gvk + listGVK.Kind += "List" + + if _, isUnstructuredList := obj.(runtime.Unstructured); isUnstructuredList && !c.scheme.Recognizes(listGVK) { + // We need to register the ListKind with UnstructuredList: + // https://github.com/kubernetes/kubernetes/blob/7b2776b89fb1be28d4e9203bdeec079be903c103/staging/src/k8s.io/client-go/dynamic/fake/simple.go#L44-L51 + c.schemeLock.RUnlock() + c.schemeLock.Lock() + c.scheme.AddKnownTypeWithName(gvk.GroupVersion().WithKind(gvk.Kind+"List"), &unstructured.UnstructuredList{}) + c.schemeLock.Unlock() + c.schemeLock.RLock() } - // we need the non-list GVK, so chop off the "List" from the end of the kind - gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] listOpts := client.ListOptions{} listOpts.ApplyOptions(opts) @@ -104,34 +481,148 @@ func (c *fakeClient) List(ctx context.Context, obj runtime.Object, opts ...clien if err != nil { return err } + j, err := json.Marshal(o) if err != nil { return err } - decoder := scheme.Codecs.UniversalDecoder() - _, _, err = decoder.Decode(j, nil, obj) + zero(obj) + if err := ensureTypeMeta(obj, originalGVK); err != nil { + return err + } + objCopy := obj.DeepCopyObject().(client.ObjectList) + if err := json.Unmarshal(j, objCopy); err != nil { + return err + } + + objs, err := meta.ExtractList(objCopy) if err != nil { return err } - if listOpts.LabelSelector != nil { - objs, err := meta.ExtractList(obj) - if err != nil { + for _, o := range objs { + if err := ensureTypeMeta(o, gvk); err != nil { return err } - filteredObjs, err := objectutil.FilterWithLabels(objs, listOpts.LabelSelector) + + if !c.returnManagedFields { + o.(metav1.Object).SetManagedFields(nil) + } + } + + if listOpts.LabelSelector == nil && listOpts.FieldSelector == nil { + return meta.SetList(obj, objs) + } + + // If we're here, either a label or field selector are specified (or both), so before we return + // the list we must filter it. If both selectors are set, they are ANDed. + filteredList, err := c.filterList(objs, gvk, listOpts.LabelSelector, listOpts.FieldSelector) + if err != nil { + return err + } + + return meta.SetList(obj, filteredList) +} + +func (c *fakeClient) filterList(list []runtime.Object, gvk schema.GroupVersionKind, ls labels.Selector, fs fields.Selector) ([]runtime.Object, error) { + // Filter the objects with the label selector + filteredList := list + if ls != nil { + objsFilteredByLabel, err := objectutil.FilterWithLabels(list, ls) if err != nil { - return err + return nil, err } - err = meta.SetList(obj, filteredObjs) + filteredList = objsFilteredByLabel + } + + // Filter the result of the previous pass with the field selector + if fs != nil { + objsFilteredByField, err := c.filterWithFields(filteredList, gvk, fs) if err != nil { - return err + return nil, err } + filteredList = objsFilteredByField } - return nil + + return filteredList, nil +} + +func (c *fakeClient) filterWithFields(list []runtime.Object, gvk schema.GroupVersionKind, fs fields.Selector) ([]runtime.Object, error) { + requiresExact := selector.RequiresExactMatch(fs) + if !requiresExact { + return nil, fmt.Errorf(`field selector %s is not in one of the two supported forms "key==val" or "key=val"`, fs) + } + + c.indexesLock.RLock() + defer c.indexesLock.RUnlock() + // Field selection is mimicked via indexes, so there's no sane answer this function can give + // if there are no indexes registered for the GroupVersionKind of the objects in the list. + indexes := c.indexes[gvk] + for _, req := range fs.Requirements() { + if len(indexes) == 0 || indexes[req.Field] == nil { + return nil, fmt.Errorf("List on GroupVersionKind %v specifies selector on field %s, but no "+ + "index with name %s has been registered for GroupVersionKind %v", gvk, req.Field, req.Field, gvk) + } + } + + filteredList := make([]runtime.Object, 0, len(list)) + for _, obj := range list { + matches := true + for _, req := range fs.Requirements() { + indexExtractor := indexes[req.Field] + if !c.objMatchesFieldSelector(obj, indexExtractor, req.Value) { + matches = false + break + } + } + if matches { + filteredList = append(filteredList, obj) + } + } + return filteredList, nil +} + +func (c *fakeClient) objMatchesFieldSelector(o runtime.Object, extractIndex client.IndexerFunc, val string) bool { + obj, isClientObject := o.(client.Object) + if !isClientObject { + panic(fmt.Errorf("expected object %v to be of type client.Object, but it's not", o)) + } + + for _, extractedVal := range extractIndex(obj) { + if extractedVal == val { + return true + } + } + + return false } -func (c *fakeClient) Create(ctx context.Context, obj runtime.Object, opts ...client.CreateOptionFunc) error { +func (c *fakeClient) Scheme() *runtime.Scheme { + return c.scheme +} + +func (c *fakeClient) RESTMapper() meta.RESTMapper { + return c.restMapper +} + +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (c *fakeClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return apiutil.GVKForObject(obj, c.scheme) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (c *fakeClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return apiutil.IsObjectNamespaced(obj, c.scheme, c.restMapper) +} + +func (c *fakeClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + if err := c.addToSchemeIfUnknownAndUnstructuredOrPartial(obj); err != nil { + return err + } + + c.schemeLock.RLock() + defer c.schemeLock.RUnlock() + createOptions := &client.CreateOptions{} createOptions.ApplyOptions(opts) @@ -149,32 +640,48 @@ func (c *fakeClient) Create(ctx context.Context, obj runtime.Object, opts ...cli if err != nil { return err } - return c.tracker.Create(gvr, obj, accessor.GetNamespace()) -} -func (c *fakeClient) Delete(ctx context.Context, obj runtime.Object, opts ...client.DeleteOptionFunc) error { - gvr, err := getGVRFromObject(obj, c.scheme) + if accessor.GetName() == "" && accessor.GetGenerateName() != "" { + base := accessor.GetGenerateName() + if len(base) > maxGeneratedNameLength { + base = base[:maxGeneratedNameLength] + } + accessor.SetName(fmt.Sprintf("%s%s", base, utilrand.String(randomLength))) + } + // Ignore attempts to set deletion timestamp + if !accessor.GetDeletionTimestamp().IsZero() { + accessor.SetDeletionTimestamp(nil) + } + + gvk, err := apiutil.GVKForObject(obj, c.scheme) if err != nil { return err } - accessor, err := meta.Accessor(obj) - if err != nil { + + c.trackerWriteLock.Lock() + defer c.trackerWriteLock.Unlock() + + if err := c.tracker.Create(gvr, obj, accessor.GetNamespace(), *createOptions.AsCreateOptions()); err != nil { + // The managed fields tracker sets gvk even on errors + _ = ensureTypeMeta(obj, gvk) return err } - //TODO: implement propagation - return c.tracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName()) -} -func (c *fakeClient) Update(ctx context.Context, obj runtime.Object, opts ...client.UpdateOptionFunc) error { - updateOptions := &client.UpdateOptions{} - updateOptions.ApplyOptions(opts) + if !c.returnManagedFields { + obj.SetManagedFields(nil) + } - for _, dryRunOpt := range updateOptions.DryRun { - if dryRunOpt == metav1.DryRunAll { - return nil - } + return ensureTypeMeta(obj, gvk) +} + +func (c *fakeClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + if err := c.addToSchemeIfUnknownAndUnstructuredOrPartial(obj); err != nil { + return err } + c.schemeLock.RLock() + defer c.schemeLock.RUnlock() + gvr, err := getGVRFromObject(obj, c.scheme) if err != nil { return err @@ -183,75 +690,1040 @@ func (c *fakeClient) Update(ctx context.Context, obj runtime.Object, opts ...cli if err != nil { return err } - return c.tracker.Update(gvr, obj, accessor.GetNamespace()) -} - -func (c *fakeClient) Patch(ctx context.Context, obj runtime.Object, patch client.Patch, opts ...client.PatchOptionFunc) error { - patchOptions := &client.PatchOptions{} - patchOptions.ApplyOptions(opts) + delOptions := client.DeleteOptions{} + delOptions.ApplyOptions(opts) - for _, dryRunOpt := range patchOptions.DryRun { + for _, dryRunOpt := range delOptions.DryRun { if dryRunOpt == metav1.DryRunAll { return nil } } - gvr, err := getGVRFromObject(obj, c.scheme) - if err != nil { + c.trackerWriteLock.Lock() + defer c.trackerWriteLock.Unlock() + // Check the ResourceVersion if that Precondition was specified. + if delOptions.Preconditions != nil && delOptions.Preconditions.ResourceVersion != nil { + name := accessor.GetName() + dbObj, err := c.tracker.Get(gvr, accessor.GetNamespace(), name) + if err != nil { + return err + } + oldAccessor, err := meta.Accessor(dbObj) + if err != nil { + return err + } + actualRV := oldAccessor.GetResourceVersion() + expectRV := *delOptions.Preconditions.ResourceVersion + if actualRV != expectRV { + msg := fmt.Sprintf( + "the ResourceVersion in the precondition (%s) does not match the ResourceVersion in record (%s). "+ + "The object might have been modified", + expectRV, actualRV) + return apierrors.NewConflict(gvr.GroupResource(), name, errors.New(msg)) + } + } + + return c.deleteObjectLocked(gvr, accessor) +} + +func (c *fakeClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { + if err := c.addToSchemeIfUnknownAndUnstructuredOrPartial(obj); err != nil { return err } - accessor, err := meta.Accessor(obj) + + c.schemeLock.RLock() + defer c.schemeLock.RUnlock() + + gvk, err := apiutil.GVKForObject(obj, c.scheme) if err != nil { return err } - data, err := patch.Data(obj) + + dcOptions := client.DeleteAllOfOptions{} + dcOptions.ApplyOptions(opts) + + for _, dryRunOpt := range dcOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + c.trackerWriteLock.Lock() + defer c.trackerWriteLock.Unlock() + + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + o, err := c.tracker.List(gvr, gvk, dcOptions.Namespace) if err != nil { return err } - reaction := testing.ObjectReaction(c.tracker) - handled, o, err := reaction(testing.NewPatchAction(gvr, accessor.GetNamespace(), accessor.GetName(), patch.Type(), data)) + objs, err := meta.ExtractList(o) if err != nil { return err } - if !handled { - panic("tracker could not handle patch method") - } - - j, err := json.Marshal(o) + filteredObjs, err := objectutil.FilterWithLabels(objs, dcOptions.LabelSelector) if err != nil { return err } - decoder := scheme.Codecs.UniversalDecoder() - _, _, err = decoder.Decode(j, nil, obj) - return err + for _, o := range filteredObjs { + accessor, err := meta.Accessor(o) + if err != nil { + return err + } + err = c.deleteObjectLocked(gvr, accessor) + if err != nil { + return err + } + } + return nil } -func (c *fakeClient) Status() client.StatusWriter { - return &fakeStatusWriter{client: c} +func (c *fakeClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + return c.update(obj, false, opts...) } -func getGVRFromObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionResource, error) { - gvk, err := apiutil.GVKForObject(obj, scheme) - if err != nil { - return schema.GroupVersionResource{}, err +func (c *fakeClient) update(obj client.Object, isStatus bool, opts ...client.UpdateOption) error { + if err := c.addToSchemeIfUnknownAndUnstructuredOrPartial(obj); err != nil { + return err } - gvr, _ := meta.UnsafeGuessKindToResource(gvk) - return gvr, nil -} -type fakeStatusWriter struct { - client *fakeClient -} + c.schemeLock.RLock() + defer c.schemeLock.RUnlock() -func (sw *fakeStatusWriter) Update(ctx context.Context, obj runtime.Object, opts ...client.UpdateOptionFunc) error { - // TODO(droot): This results in full update of the obj (spec + status). Need - // a way to update status field only. - return sw.client.Update(ctx, obj, opts...) -} + updateOptions := &client.UpdateOptions{} + updateOptions.ApplyOptions(opts) -func (sw *fakeStatusWriter) Patch(ctx context.Context, obj runtime.Object, patch client.Patch, opts ...client.PatchOptionFunc) error { - // TODO(droot): This results in full update of the obj (spec + status). Need - // a way to update status field only. - return sw.client.Patch(ctx, obj, patch, opts...) + for _, dryRunOpt := range updateOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + + c.trackerWriteLock.Lock() + defer c.trackerWriteLock.Unlock() + + // Retain managed fields + // We can ignore all errors here since update will fail if we encounter an error. + obj.SetManagedFields(nil) + current, _ := c.tracker.Get(gvr, accessor.GetNamespace(), accessor.GetName()) + if currentMetaObj, ok := current.(metav1.Object); ok { + obj.SetManagedFields(currentMetaObj.GetManagedFields()) + } + + if err := c.tracker.update(gvr, obj, accessor.GetNamespace(), isStatus, false, *updateOptions.AsUpdateOptions()); err != nil { + return err + } + + if !c.returnManagedFields { + obj.SetManagedFields(nil) + } + + return ensureTypeMeta(obj, gvk) +} + +func (c *fakeClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + return c.patch(obj, patch, opts...) +} + +func (c *fakeClient) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...client.ApplyOption) error { + applyOpts := &client.ApplyOptions{} + applyOpts.ApplyOptions(opts) + + data, err := json.Marshal(obj) + if err != nil { + return fmt.Errorf("failed to marshal apply configuration: %w", err) + } + + u := &unstructured.Unstructured{} + if err := json.Unmarshal(data, u); err != nil { + return fmt.Errorf("failed to unmarshal apply configuration: %w", err) + } + + applyPatch := &fakeApplyPatch{} + + patchOpts := &client.PatchOptions{} + patchOpts.Raw = applyOpts.AsPatchOptions() + + if err := c.patch(u, applyPatch, patchOpts); err != nil { + return err + } + + acJSON, err := json.Marshal(u) + if err != nil { + return fmt.Errorf("failed to marshal patched object: %w", err) + } + + // We have to zero the object in case it contained a status and there is a + // status subresource. If its the private `unstructuredApplyConfiguration` + // we can not zero all of it, as that will cause the embedded Unstructured + // to be nil which then causes a NPD in the json.Unmarshal below. + switch reflect.TypeOf(obj).String() { + case "*client.unstructuredApplyConfiguration": + zero(reflect.ValueOf(obj).Elem().FieldByName("Unstructured").Interface()) + default: + zero(obj) + } + if err := json.Unmarshal(acJSON, obj); err != nil { + return fmt.Errorf("failed to unmarshal patched object: %w", err) + } + + return nil +} + +type fakeApplyPatch struct{} + +func (p *fakeApplyPatch) Type() types.PatchType { + return types.ApplyPatchType +} + +func (p *fakeApplyPatch) Data(obj client.Object) ([]byte, error) { + return json.Marshal(obj) +} + +func (c *fakeClient) patch(obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + if err := c.addToSchemeIfUnknownAndUnstructuredOrPartial(obj); err != nil { + return err + } + + patchOptions := &client.PatchOptions{} + patchOptions.ApplyOptions(opts) + + if errs := validation.ValidatePatchOptions(patchOptions.AsPatchOptions(), patch.Type()); len(errs) > 0 { + return apierrors.NewInvalid(schema.GroupKind{Group: "meta.k8s.io", Kind: "PatchOptions"}, "", errs) + } + + c.schemeLock.RLock() + defer c.schemeLock.RUnlock() + + for _, dryRunOpt := range patchOptions.DryRun { + if dryRunOpt == metav1.DryRunAll { + return nil + } + } + + gvr, err := getGVRFromObject(obj, c.scheme) + if err != nil { + return err + } + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + + var isApplyCreate bool + c.trackerWriteLock.Lock() + defer c.trackerWriteLock.Unlock() + oldObj, err := c.tracker.Get(gvr, accessor.GetNamespace(), accessor.GetName()) + if err != nil { + if !apierrors.IsNotFound(err) || patch.Type() != types.ApplyPatchType { + return err + } + oldObj = &unstructured.Unstructured{} + isApplyCreate = true + } + oldAccessor, err := meta.Accessor(oldObj) + if err != nil { + return err + } + + if patch.Type() == types.ApplyPatchType { + if isApplyCreate { + // Overwrite it unconditionally, this matches the apiserver behavior + // which allows to set it on create, but will then ignore it. + obj.SetResourceVersion("1") + } else { + // SSA deletionTimestamp updates are silently ignored + obj.SetDeletionTimestamp(oldAccessor.GetDeletionTimestamp()) + } + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + action := testing.NewPatchActionWithOptions( + gvr, + accessor.GetNamespace(), + accessor.GetName(), + patch.Type(), + data, + *patchOptions.AsPatchOptions(), + ) + + // Apply is implemented in the tracker and calling it has side-effects + // such as bumping RV and updating managedFields timestamps, hence we + // can not dry-run it. Luckily, the only validation we use it for + // doesn't apply to SSA - Creating objects with non-nil deletionTimestamp + // through SSA is possible and updating the deletionTimestamp is valid, + // but has no effect. + if patch.Type() != types.ApplyPatchType { + // Apply patch without updating object. + // To remain in accordance with the behavior of k8s api behavior, + // a patch must not allow for changes to the deletionTimestamp of an object. + // The reaction() function applies the patch to the object and calls Update(), + // whereas dryPatch() replicates this behavior but skips the call to Update(). + // This ensures that the patch may be rejected if a deletionTimestamp is modified, prior + // to updating the object. + o, err := dryPatch(action, c.tracker) + if err != nil { + return err + } + newObj, err := meta.Accessor(o) + if err != nil { + return err + } + + // Validate that deletionTimestamp has not been changed + if !deletionTimestampEqual(newObj, oldAccessor) { + return fmt.Errorf("rejected patch, metadata.deletionTimestamp immutable") + } + } + + reaction := testing.ObjectReaction(c.tracker) + handled, o, err := reaction(action) + if err != nil { + // The reaction calls tracker.Get after tracker.Apply to return the object, + // but we may have deleted it in tracker.Apply if there was no finalizer + // left. + if apierrors.IsNotFound(err) && + patch.Type() == types.ApplyPatchType && + oldAccessor.GetDeletionTimestamp() != nil && + len(obj.GetFinalizers()) == 0 { + return nil + } + return err + } + if !handled { + panic("tracker could not handle patch method") + } + + ta, err := meta.TypeAccessor(o) + if err != nil { + return err + } + + ta.SetAPIVersion(gvk.GroupVersion().String()) + ta.SetKind(gvk.Kind) + + j, err := json.Marshal(o) + if err != nil { + return err + } + zero(obj) + if err := json.Unmarshal(j, obj); err != nil { + return err + } + + if !c.returnManagedFields { + obj.SetManagedFields(nil) + } + + return ensureTypeMeta(obj, gvk) +} + +// Applying a patch results in a deletionTimestamp that is truncated to the nearest second. +// Check that the diff between a new and old deletion timestamp is within a reasonable threshold +// to be considered unchanged. +func deletionTimestampEqual(newObj metav1.Object, obj metav1.Object) bool { + newTime := newObj.GetDeletionTimestamp() + oldTime := obj.GetDeletionTimestamp() + + if newTime == nil || oldTime == nil { + return newTime == oldTime + } + return newTime.Time.Sub(oldTime.Time).Abs() < time.Second +} + +// The behavior of applying the patch is pulled out into dryPatch(), +// which applies the patch and returns an object, but does not Update() the object. +// This function returns a patched runtime object that may then be validated before a call to Update() is executed. +// This results in some code duplication, but was found to be a cleaner alternative than unmarshalling and introspecting the patch data +// and easier than refactoring the k8s client-go method upstream. +// Duplicate of upstream: https://github.com/kubernetes/client-go/blob/783d0d33626e59d55d52bfd7696b775851f92107/testing/fixture.go#L146-L194 +func dryPatch(action testing.PatchActionImpl, tracker testing.ObjectTracker) (runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + + obj, err := tracker.Get(gvr, ns, action.GetName()) + if err != nil { + if apierrors.IsNotFound(err) && action.GetPatchType() == types.ApplyPatchType { + return &unstructured.Unstructured{}, nil + } + return nil, err + } + + old, err := json.Marshal(obj) + if err != nil { + return nil, err + } + + // reset the object in preparation to unmarshal, since unmarshal does not guarantee that fields + // in obj that are removed by patch are cleared + value := reflect.ValueOf(obj) + value.Elem().Set(reflect.New(value.Type().Elem()).Elem()) + + switch action.GetPatchType() { + case types.JSONPatchType: + patch, err := jsonpatch.DecodePatch(action.GetPatch()) + if err != nil { + return nil, err + } + modified, err := patch.Apply(old) + if err != nil { + return nil, err + } + + if err = json.Unmarshal(modified, obj); err != nil { + return nil, err + } + case types.MergePatchType: + modified, err := jsonpatch.MergePatch(old, action.GetPatch()) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(modified, obj); err != nil { + return nil, err + } + case types.StrategicMergePatchType: + mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) + if err != nil { + return nil, err + } + if err = json.Unmarshal(mergedByte, obj); err != nil { + return nil, err + } + case types.ApplyCBORPatchType: + return nil, errors.New("apply CBOR patches are not supported in the fake client") + case types.ApplyPatchType: + return nil, errors.New("bug in controller-runtime: should not end up in dryPatch for SSA") + default: + return nil, fmt.Errorf("%s PatchType is not supported", action.GetPatchType()) + } + return obj, nil +} + +// copyStatusFrom copies the status from old into new +func copyStatusFrom(old, n runtime.Object) error { + oldMapStringAny, err := toMapStringAny(old) + if err != nil { + return fmt.Errorf("failed to convert old to *unstructured.Unstructured: %w", err) + } + newMapStringAny, err := toMapStringAny(n) + if err != nil { + return fmt.Errorf("failed to convert new to *unststructured.Unstructured: %w", err) + } + + newMapStringAny["status"] = oldMapStringAny["status"] + + if err := fromMapStringAny(newMapStringAny, n); err != nil { + return fmt.Errorf("failed to convert back from map[string]any: %w", err) + } + + return nil +} + +// copyFrom copies from old into new +func copyFrom(old, n runtime.Object) error { + oldMapStringAny, err := toMapStringAny(old) + if err != nil { + return fmt.Errorf("failed to convert old to *unstructured.Unstructured: %w", err) + } + if err := fromMapStringAny(oldMapStringAny, n); err != nil { + return fmt.Errorf("failed to convert back from map[string]any: %w", err) + } + + return nil +} + +func toMapStringAny(obj runtime.Object) (map[string]any, error) { + if unstructured, isUnstructured := obj.(*unstructured.Unstructured); isUnstructured { + return unstructured.Object, nil + } + + serialized, err := json.Marshal(obj) + if err != nil { + return nil, err + } + + u := map[string]any{} + return u, json.Unmarshal(serialized, &u) +} + +func fromMapStringAny(u map[string]any, target runtime.Object) error { + if targetUnstructured, isUnstructured := target.(*unstructured.Unstructured); isUnstructured { + targetUnstructured.Object = u + return nil + } + + serialized, err := json.Marshal(u) + if err != nil { + return fmt.Errorf("failed to serialize: %w", err) + } + + zero(target) + if err := json.Unmarshal(serialized, &target); err != nil { + return fmt.Errorf("failed to deserialize: %w", err) + } + + return nil +} + +func (c *fakeClient) Status() client.SubResourceWriter { + return c.SubResource("status") +} + +func (c *fakeClient) SubResource(subResource string) client.SubResourceClient { + return &fakeSubResourceClient{client: c, subResource: subResource} +} + +func (c *fakeClient) deleteObjectLocked(gvr schema.GroupVersionResource, accessor metav1.Object) error { + old, err := c.tracker.Get(gvr, accessor.GetNamespace(), accessor.GetName()) + if err == nil { + oldAccessor, err := meta.Accessor(old) + if err == nil { + if len(oldAccessor.GetFinalizers()) > 0 { + now := metav1.Now() + oldAccessor.SetDeletionTimestamp(&now) + // Call update directly with mutability parameter set to true to allow + // changes to deletionTimestamp + return c.tracker.update(gvr, old, accessor.GetNamespace(), false, true, metav1.UpdateOptions{}) + } + } + } + + // TODO: implement propagation + return c.tracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName()) +} + +func getGVRFromObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionResource, error) { + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return schema.GroupVersionResource{}, err + } + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + return gvr, nil +} + +type fakeSubResourceClient struct { + client *fakeClient + subResource string +} + +func (sw *fakeSubResourceClient) Get(ctx context.Context, obj, subResource client.Object, opts ...client.SubResourceGetOption) error { + switch sw.subResource { + case subResourceScale: + // Actual client looks up resource, then extracts the scale sub-resource: + // https://github.com/kubernetes/kubernetes/blob/fb6bbc9781d11a87688c398778525c4e1dcb0f08/pkg/registry/apps/deployment/storage/storage.go#L307 + if err := sw.client.Get(ctx, client.ObjectKeyFromObject(obj), obj); err != nil { + return err + } + scale, isScale := subResource.(*autoscalingv1.Scale) + if !isScale { + return apierrors.NewBadRequest(fmt.Sprintf("expected Scale, got %T", subResource)) + } + scaleOut, err := extractScale(obj) + if err != nil { + return err + } + *scale = *scaleOut + return nil + default: + return fmt.Errorf("fakeSubResourceClient does not support get for %s", sw.subResource) + } +} + +func (sw *fakeSubResourceClient) Create(ctx context.Context, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error { + switch sw.subResource { + case "eviction": + _, isEviction := subResource.(*policyv1beta1.Eviction) + if !isEviction { + _, isEviction = subResource.(*policyv1.Eviction) + } + if !isEviction { + return apierrors.NewBadRequest(fmt.Sprintf("got invalid type %T, expected Eviction", subResource)) + } + if _, isPod := obj.(*corev1.Pod); !isPod { + return apierrors.NewNotFound(schema.GroupResource{}, "") + } + + return sw.client.Delete(ctx, obj) + case "token": + tokenRequest, isTokenRequest := subResource.(*authenticationv1.TokenRequest) + if !isTokenRequest { + return apierrors.NewBadRequest(fmt.Sprintf("got invalid type %T, expected TokenRequest", subResource)) + } + if _, isServiceAccount := obj.(*corev1.ServiceAccount); !isServiceAccount { + return apierrors.NewNotFound(schema.GroupResource{}, "") + } + + tokenRequest.Status.Token = "fake-token" + tokenRequest.Status.ExpirationTimestamp = metav1.Date(6041, 1, 1, 0, 0, 0, 0, time.UTC) + + return sw.client.Get(ctx, client.ObjectKeyFromObject(obj), obj) + default: + return fmt.Errorf("fakeSubResourceWriter does not support create for %s", sw.subResource) + } +} + +func (sw *fakeSubResourceClient) Update(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + updateOptions := client.SubResourceUpdateOptions{} + updateOptions.ApplyOptions(opts) + + switch sw.subResource { + case subResourceScale: + if err := sw.client.Get(ctx, client.ObjectKeyFromObject(obj), obj.DeepCopyObject().(client.Object)); err != nil { + return err + } + if updateOptions.SubResourceBody == nil { + return apierrors.NewBadRequest("missing SubResourceBody") + } + + scale, isScale := updateOptions.SubResourceBody.(*autoscalingv1.Scale) + if !isScale { + return apierrors.NewBadRequest(fmt.Sprintf("expected Scale, got %T", updateOptions.SubResourceBody)) + } + if err := applyScale(obj, scale); err != nil { + return err + } + return sw.client.update(obj, false, &updateOptions.UpdateOptions) + default: + body := obj + if updateOptions.SubResourceBody != nil { + body = updateOptions.SubResourceBody + } + return sw.client.update(body, true, &updateOptions.UpdateOptions) + } +} + +func (sw *fakeSubResourceClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error { + patchOptions := client.SubResourcePatchOptions{} + patchOptions.ApplyOptions(opts) + + body := obj + if patchOptions.SubResourceBody != nil { + body = patchOptions.SubResourceBody + } + + // this is necessary to identify that last call was made for status patch, through stack trace. + if sw.subResource == "status" { + return sw.statusPatch(body, patch, patchOptions) + } + + return sw.client.patch(body, patch, &patchOptions.PatchOptions) +} + +func (sw *fakeSubResourceClient) statusPatch(body client.Object, patch client.Patch, patchOptions client.SubResourcePatchOptions) error { + return sw.client.patch(body, patch, &patchOptions.PatchOptions) +} + +func (sw *fakeSubResourceClient) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...client.SubResourceApplyOption) error { + if sw.subResource != "status" { + return errors.New("fakeSubResourceClient currently only supports Apply for status subresource") + } + + applyOpts := &client.SubResourceApplyOptions{} + applyOpts.ApplyOpts(opts) + + data, err := json.Marshal(obj) + if err != nil { + return fmt.Errorf("failed to marshal apply configuration: %w", err) + } + + u := &unstructured.Unstructured{} + if err := json.Unmarshal(data, u); err != nil { + return fmt.Errorf("failed to unmarshal apply configuration: %w", err) + } + + patchOpts := &client.SubResourcePatchOptions{} + patchOpts.Raw = applyOpts.AsPatchOptions() + + if applyOpts.SubResourceBody != nil { + subResourceBodySerialized, err := json.Marshal(applyOpts.SubResourceBody) + if err != nil { + return fmt.Errorf("failed to serialize subresource body: %w", err) + } + subResourceBody := &unstructured.Unstructured{} + if err := json.Unmarshal(subResourceBodySerialized, subResourceBody); err != nil { + return fmt.Errorf("failed to unmarshal subresource body: %w", err) + } + patchOpts.SubResourceBody = subResourceBody + } + + return sw.Patch(ctx, u, &fakeApplyPatch{}, patchOpts) +} + +func allowsUnconditionalUpdate(gvk schema.GroupVersionKind) bool { + switch gvk.Group { + case "apps": + switch gvk.Kind { + case "ControllerRevision", "DaemonSet", "Deployment", "ReplicaSet", "StatefulSet": + return true + } + case "autoscaling": + switch gvk.Kind { + case "HorizontalPodAutoscaler": + return true + } + case "batch": + switch gvk.Kind { + case "CronJob", "Job": + return true + } + case "certificates": + switch gvk.Kind { + case "Certificates": + return true + } + case "flowcontrol": + switch gvk.Kind { + case "FlowSchema", "PriorityLevelConfiguration": + return true + } + case "networking": + switch gvk.Kind { + case "Ingress", "IngressClass", "NetworkPolicy": + return true + } + case "policy": + switch gvk.Kind { + case "PodSecurityPolicy": + return true + } + case "rbac.authorization.k8s.io": + switch gvk.Kind { + case "ClusterRole", "ClusterRoleBinding", "Role", "RoleBinding": + return true + } + case "scheduling": + switch gvk.Kind { + case "PriorityClass": + return true + } + case "settings": + switch gvk.Kind { + case "PodPreset": + return true + } + case "storage": + switch gvk.Kind { + case "StorageClass": + return true + } + case "": + switch gvk.Kind { + case "ConfigMap", "Endpoint", "Event", "LimitRange", "Namespace", "Node", + "PersistentVolume", "PersistentVolumeClaim", "Pod", "PodTemplate", + "ReplicationController", "ResourceQuota", "Secret", "Service", + "ServiceAccount", "EndpointSlice": + return true + } + } + + return false +} + +func allowsCreateOnUpdate(gvk schema.GroupVersionKind) bool { + switch gvk.Group { + case "coordination": + switch gvk.Kind { + case "Lease": + return true + } + case "node": + switch gvk.Kind { + case "RuntimeClass": + return true + } + case "rbac": + switch gvk.Kind { + case "ClusterRole", "ClusterRoleBinding", "Role", "RoleBinding": + return true + } + case "": + switch gvk.Kind { + case "Endpoint", "Event", "LimitRange", "Service": + return true + } + } + + return false +} + +func inTreeResourcesWithStatus() []schema.GroupVersionKind { + return []schema.GroupVersionKind{ + {Version: "v1", Kind: "Namespace"}, + {Version: "v1", Kind: "Node"}, + {Version: "v1", Kind: "PersistentVolumeClaim"}, + {Version: "v1", Kind: "PersistentVolume"}, + {Version: "v1", Kind: "Pod"}, + {Version: "v1", Kind: "ReplicationController"}, + {Version: "v1", Kind: "Service"}, + + {Group: "apps", Version: "v1", Kind: "Deployment"}, + {Group: "apps", Version: "v1", Kind: "DaemonSet"}, + {Group: "apps", Version: "v1", Kind: "ReplicaSet"}, + {Group: "apps", Version: "v1", Kind: "StatefulSet"}, + + {Group: "autoscaling", Version: "v1", Kind: "HorizontalPodAutoscaler"}, + + {Group: "batch", Version: "v1", Kind: "CronJob"}, + {Group: "batch", Version: "v1", Kind: "Job"}, + + {Group: "certificates.k8s.io", Version: "v1", Kind: "CertificateSigningRequest"}, + + {Group: "networking.k8s.io", Version: "v1", Kind: "Ingress"}, + {Group: "networking.k8s.io", Version: "v1", Kind: "NetworkPolicy"}, + + {Group: "policy", Version: "v1", Kind: "PodDisruptionBudget"}, + + {Group: "storage.k8s.io", Version: "v1", Kind: "VolumeAttachment"}, + + {Group: "apiextensions.k8s.io", Version: "v1", Kind: "CustomResourceDefinition"}, + + {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "FlowSchema"}, + {Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta2", Kind: "PriorityLevelConfiguration"}, + {Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "FlowSchema"}, + {Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "PriorityLevelConfiguration"}, + } +} + +// zero zeros the value of a pointer. +func zero(x interface{}) { + if x == nil { + return + } + res := reflect.ValueOf(x).Elem() + res.Set(reflect.Zero(res.Type())) +} + +// getSingleOrZeroOptions returns the single options value in the slice, its +// zero value if the slice is empty, or an error if the slice contains more than +// one option value. +func getSingleOrZeroOptions[T any](opts []T) (opt T, err error) { + switch len(opts) { + case 0: + case 1: + opt = opts[0] + default: + err = fmt.Errorf("expected single or no options value, got %d values", len(opts)) + } + return +} + +func extractScale(obj client.Object) (*autoscalingv1.Scale, error) { + switch obj := obj.(type) { + case *appsv1.Deployment: + var replicas int32 = 1 + if obj.Spec.Replicas != nil { + replicas = *obj.Spec.Replicas + } + var selector string + if obj.Spec.Selector != nil { + selector = obj.Spec.Selector.String() + } + return &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: obj.Namespace, + Name: obj.Name, + UID: obj.UID, + ResourceVersion: obj.ResourceVersion, + CreationTimestamp: obj.CreationTimestamp, + }, + Spec: autoscalingv1.ScaleSpec{ + Replicas: replicas, + }, + Status: autoscalingv1.ScaleStatus{ + Replicas: obj.Status.Replicas, + Selector: selector, + }, + }, nil + case *appsv1.ReplicaSet: + var replicas int32 = 1 + if obj.Spec.Replicas != nil { + replicas = *obj.Spec.Replicas + } + var selector string + if obj.Spec.Selector != nil { + selector = obj.Spec.Selector.String() + } + return &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: obj.Namespace, + Name: obj.Name, + UID: obj.UID, + ResourceVersion: obj.ResourceVersion, + CreationTimestamp: obj.CreationTimestamp, + }, + Spec: autoscalingv1.ScaleSpec{ + Replicas: replicas, + }, + Status: autoscalingv1.ScaleStatus{ + Replicas: obj.Status.Replicas, + Selector: selector, + }, + }, nil + case *corev1.ReplicationController: + var replicas int32 = 1 + if obj.Spec.Replicas != nil { + replicas = *obj.Spec.Replicas + } + return &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: obj.Namespace, + Name: obj.Name, + UID: obj.UID, + ResourceVersion: obj.ResourceVersion, + CreationTimestamp: obj.CreationTimestamp, + }, + Spec: autoscalingv1.ScaleSpec{ + Replicas: replicas, + }, + Status: autoscalingv1.ScaleStatus{ + Replicas: obj.Status.Replicas, + Selector: labels.Set(obj.Spec.Selector).String(), + }, + }, nil + case *appsv1.StatefulSet: + var replicas int32 = 1 + if obj.Spec.Replicas != nil { + replicas = *obj.Spec.Replicas + } + var selector string + if obj.Spec.Selector != nil { + selector = obj.Spec.Selector.String() + } + return &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: obj.Namespace, + Name: obj.Name, + UID: obj.UID, + ResourceVersion: obj.ResourceVersion, + CreationTimestamp: obj.CreationTimestamp, + }, + Spec: autoscalingv1.ScaleSpec{ + Replicas: replicas, + }, + Status: autoscalingv1.ScaleStatus{ + Replicas: obj.Status.Replicas, + Selector: selector, + }, + }, nil + default: + // TODO: CRDs https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#scale-subresource + return nil, fmt.Errorf("unimplemented scale subresource for resource %T", obj) + } +} + +func applyScale(obj client.Object, scale *autoscalingv1.Scale) error { + switch obj := obj.(type) { + case *appsv1.Deployment: + obj.Spec.Replicas = ptr.To(scale.Spec.Replicas) + case *appsv1.ReplicaSet: + obj.Spec.Replicas = ptr.To(scale.Spec.Replicas) + case *corev1.ReplicationController: + obj.Spec.Replicas = ptr.To(scale.Spec.Replicas) + case *appsv1.StatefulSet: + obj.Spec.Replicas = ptr.To(scale.Spec.Replicas) + default: + // TODO: CRDs https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#scale-subresource + return fmt.Errorf("unimplemented scale subresource for resource %T", obj) + } + return nil +} + +// AddIndex adds an index to a fake client. It will panic if used with a client that is not a fake client. +// It will error if there is already an index for given object with the same name as field. +// +// It can be used to test code that adds indexes to the cache at runtime. +func AddIndex(c client.Client, obj runtime.Object, field string, extractValue client.IndexerFunc) error { + fakeClient, isFakeClient := c.(*fakeClient) + if !isFakeClient { + panic("AddIndex can only be used with a fake client") + } + fakeClient.indexesLock.Lock() + defer fakeClient.indexesLock.Unlock() + + if fakeClient.indexes == nil { + fakeClient.indexes = make(map[schema.GroupVersionKind]map[string]client.IndexerFunc, 1) + } + + gvk, err := apiutil.GVKForObject(obj, fakeClient.scheme) + if err != nil { + return fmt.Errorf("failed to get gvk for %T: %w", obj, err) + } + + if fakeClient.indexes[gvk] == nil { + fakeClient.indexes[gvk] = make(map[string]client.IndexerFunc, 1) + } + + if fakeClient.indexes[gvk][field] != nil { + return fmt.Errorf("index %s already exists", field) + } + + fakeClient.indexes[gvk][field] = extractValue + + return nil +} + +func (c *fakeClient) addToSchemeIfUnknownAndUnstructuredOrPartial(obj runtime.Object) error { + c.schemeLock.Lock() + defer c.schemeLock.Unlock() + + _, isUnstructured := obj.(*unstructured.Unstructured) + _, isUnstructuredList := obj.(*unstructured.UnstructuredList) + _, isPartial := obj.(*metav1.PartialObjectMetadata) + _, isPartialList := obj.(*metav1.PartialObjectMetadataList) + if !isUnstructured && !isUnstructuredList && !isPartial && !isPartialList { + return nil + } + + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return err + } + + if !c.scheme.Recognizes(gvk) { + c.scheme.AddKnownTypeWithName(gvk, obj) + } + + return nil +} + +func ensureTypeMeta(obj runtime.Object, gvk schema.GroupVersionKind) error { + ta, err := meta.TypeAccessor(obj) + if err != nil { + return err + } + _, isUnstructured := obj.(runtime.Unstructured) + _, isPartialObject := obj.(*metav1.PartialObjectMetadata) + _, isPartialObjectList := obj.(*metav1.PartialObjectMetadataList) + if !isUnstructured && !isPartialObject && !isPartialObjectList { + ta.SetKind("") + ta.SetAPIVersion("") + return nil + } + + ta.SetKind(gvk.Kind) + ta.SetAPIVersion(gvk.GroupVersion().String()) + + return nil } diff --git a/pkg/client/fake/client_suite_test.go b/pkg/client/fake/client_suite_test.go index 7342148c47..66590f0b58 100644 --- a/pkg/client/fake/client_suite_test.go +++ b/pkg/client/fake/client_suite_test.go @@ -19,9 +19,8 @@ package fake import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -29,10 +28,9 @@ import ( func TestSource(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, "Controller Integration Suite", []Reporter{envtest.NewlineReporter{}}) + RunSpecs(t, "Fake client Suite") } -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) - close(done) -}, 60) +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) +}) diff --git a/pkg/client/fake/client_test.go b/pkg/client/fake/client_test.go index ad173fae01..36722b4ddc 100644 --- a/pkg/client/fake/client_test.go +++ b/pkg/client/fake/client_test.go @@ -17,31 +17,70 @@ limitations under the License. package fake import ( + "context" "encoding/json" + "fmt" + "strconv" + "sync" + "time" - . "github.com/onsi/ginkgo" + "github.com/google/go-cmp/cmp" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - appsv1 "k8s.io/api/apps/v1" + authenticationv1 "k8s.io/api/authentication/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + coordinationv1 "k8s.io/api/coordination/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + policyv1 "k8s.io/api/policy/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + clientgoapplyconfigurations "k8s.io/client-go/applyconfigurations" + corev1applyconfigurations "k8s.io/client-go/applyconfigurations/core/v1" + "k8s.io/client-go/kubernetes/fake" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/testing" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/interceptor" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +const ( + machineIDFromStatusUpdate = "machine-id-from-status-update" + cidrFromStatusUpdate = "cidr-from-status-update" ) var _ = Describe("Fake client", func() { var dep *appsv1.Deployment var dep2 *appsv1.Deployment var cm *corev1.ConfigMap - var cl client.Client + var cl client.WithWatch BeforeEach(func() { + replicas := int32(1) dep = &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "ns1", + Name: "test-deployment", + Namespace: "ns1", + ResourceVersion: trackerAddResourceVersion, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Strategy: appsv1.DeploymentStrategy{ + Type: appsv1.RecreateDeploymentStrategyType, + }, }, } dep2 = &appsv1.Deployment{ @@ -51,12 +90,17 @@ var _ = Describe("Fake client", func() { Labels: map[string]string{ "test-label": "label-value", }, + ResourceVersion: trackerAddResourceVersion, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, }, } cm = &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "ns2", + Name: "test-cm", + Namespace: "ns2", + ResourceVersion: trackerAddResourceVersion, }, Data: map[string]string{ "test-key": "test-value", @@ -64,41 +108,292 @@ var _ = Describe("Fake client", func() { } }) - AssertClientBehavior := func() { - It("should be able to Get", func() { + AssertClientWithoutIndexBehavior := func() { + It("should be able to Get", func(ctx SpecContext) { By("Getting a deployment") namespacedName := types.NamespacedName{ Name: "test-deployment", Namespace: "ns1", } obj := &appsv1.Deployment{} - err := cl.Get(nil, namespacedName, obj) - Expect(err).To(BeNil()) + err := cl.Get(ctx, namespacedName, obj) + Expect(err).ToNot(HaveOccurred()) Expect(obj).To(Equal(dep)) }) - It("should be able to List", func() { + It("should be able to Get using unstructured", func(ctx SpecContext) { + By("Getting a deployment") + namespacedName := types.NamespacedName{ + Name: "test-deployment", + Namespace: "ns1", + } + obj := &unstructured.Unstructured{} + obj.SetAPIVersion("apps/v1") + obj.SetKind("Deployment") + err := cl.Get(ctx, namespacedName, obj) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should be able to List", func(ctx SpecContext) { By("Listing all deployments in a namespace") list := &appsv1.DeploymentList{} - err := cl.List(nil, list, client.InNamespace("ns1")) - Expect(err).To(BeNil()) + err := cl.List(ctx, list, client.InNamespace("ns1")) + Expect(err).ToNot(HaveOccurred()) Expect(list.Items).To(HaveLen(2)) Expect(list.Items).To(ConsistOf(*dep, *dep2)) }) - It("should support filtering by labels", func() { - By("Listing deployments with a particular label") + It("should be able to List using unstructured list", func(ctx SpecContext) { + By("Listing all deployments in a namespace") + list := &unstructured.UnstructuredList{} + list.SetAPIVersion("apps/v1") + list.SetKind("DeploymentList") + err := cl.List(ctx, list, client.InNamespace("ns1")) + Expect(err).ToNot(HaveOccurred()) + Expect(list.GroupVersionKind().GroupVersion().String()).To(Equal("apps/v1")) + Expect(list.GetKind()).To(Equal("DeploymentList")) + Expect(list.Items).To(HaveLen(2)) + }) + + It("should be able to List using unstructured list when setting a non-list kind", func(ctx SpecContext) { + By("Listing all deployments in a namespace") + list := &unstructured.UnstructuredList{} + list.SetAPIVersion("apps/v1") + list.SetKind("Deployment") + err := cl.List(ctx, list, client.InNamespace("ns1")) + Expect(err).ToNot(HaveOccurred()) + Expect(list.GroupVersionKind().GroupVersion().String()).To(Equal("apps/v1")) + Expect(list.GetKind()).To(Equal("Deployment")) + Expect(list.Items).To(HaveLen(2)) + }) + + It("should be able to retrieve registered objects that got manipulated as unstructured", func(ctx SpecContext) { + list := func() { + By("Listing all endpoints in a namespace") + list := &unstructured.UnstructuredList{} + list.SetAPIVersion("v1") + list.SetKind("EndpointsList") + err := cl.List(ctx, list, client.InNamespace("ns1")) + Expect(err).ToNot(HaveOccurred()) + Expect(list.GroupVersionKind().GroupVersion().String()).To(Equal("v1")) + Expect(list.GetKind()).To(Equal("EndpointsList")) + Expect(list.Items).To(HaveLen(1)) + } + + unstructuredEndpoint := func() *unstructured.Unstructured { + item := &unstructured.Unstructured{} + item.SetAPIVersion("v1") + item.SetKind("Endpoints") + item.SetName("test-endpoint") + item.SetNamespace("ns1") + return item + } + + By("Adding the object during client initialization") + cl = NewClientBuilder().WithRuntimeObjects(unstructuredEndpoint()).Build() + list() + Expect(cl.Delete(ctx, unstructuredEndpoint())).To(Succeed()) + + By("Creating an object") + item := unstructuredEndpoint() + err := cl.Create(ctx, item) + Expect(err).ToNot(HaveOccurred()) + list() + + By("Updating the object") + item.SetAnnotations(map[string]string{"foo": "bar"}) + err = cl.Update(ctx, item) + Expect(err).ToNot(HaveOccurred()) + list() + + By("Patching the object") + old := item.DeepCopy() + item.SetAnnotations(map[string]string{"bar": "baz"}) + err = cl.Patch(ctx, item, client.MergeFrom(old)) + Expect(err).ToNot(HaveOccurred()) + list() + }) + + It("should be able to Create an unregistered type using unstructured", func(ctx SpecContext) { + item := &unstructured.Unstructured{} + item.SetAPIVersion("custom/v1") + item.SetKind("Image") + item.SetName("my-item") + err := cl.Create(ctx, item) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should be able to Get an unregisted type using unstructured", func(ctx SpecContext) { + By("Creating an object of an unregistered type") + item := &unstructured.Unstructured{} + item.SetAPIVersion("custom/v2") + item.SetKind("Image") + item.SetName("my-item") + err := cl.Create(ctx, item) + Expect(err).ToNot(HaveOccurred()) + + By("Getting and the object") + item = &unstructured.Unstructured{} + item.SetAPIVersion("custom/v2") + item.SetKind("Image") + item.SetName("my-item") + err = cl.Get(ctx, client.ObjectKeyFromObject(item), item) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should be able to List an unregistered type using unstructured with ListKind", func(ctx SpecContext) { + list := &unstructured.UnstructuredList{} + list.SetAPIVersion("custom/v3") + list.SetKind("ImageList") + err := cl.List(ctx, list) + Expect(list.GroupVersionKind().GroupVersion().String()).To(Equal("custom/v3")) + Expect(list.GetKind()).To(Equal("ImageList")) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should be able to List an unregistered type using unstructured with Kind", func(ctx SpecContext) { + list := &unstructured.UnstructuredList{} + list.SetAPIVersion("custom/v4") + list.SetKind("Image") + err := cl.List(ctx, list) + Expect(err).ToNot(HaveOccurred()) + Expect(list.GroupVersionKind().GroupVersion().String()).To(Equal("custom/v4")) + Expect(list.GetKind()).To(Equal("Image")) + }) + + It("should be able to Update an unregistered type using unstructured", func(ctx SpecContext) { + By("Creating an object of an unregistered type") + item := &unstructured.Unstructured{} + item.SetAPIVersion("custom/v5") + item.SetKind("Image") + item.SetName("my-item") + err := cl.Create(ctx, item) + Expect(err).ToNot(HaveOccurred()) + + By("Updating the object") + err = unstructured.SetNestedField(item.Object, int64(2), "spec", "replicas") + Expect(err).ToNot(HaveOccurred()) + err = cl.Update(ctx, item) + Expect(err).ToNot(HaveOccurred()) + + By("Getting the object") + item = &unstructured.Unstructured{} + item.SetAPIVersion("custom/v5") + item.SetKind("Image") + item.SetName("my-item") + err = cl.Get(ctx, client.ObjectKeyFromObject(item), item) + Expect(err).ToNot(HaveOccurred()) + + By("Inspecting the object") + value, found, err := unstructured.NestedInt64(item.Object, "spec", "replicas") + Expect(err).ToNot(HaveOccurred()) + Expect(found).To(BeTrue()) + Expect(value).To(Equal(int64(2))) + }) + + It("should be able to Patch an unregistered type using unstructured", func(ctx SpecContext) { + By("Creating an object of an unregistered type") + item := &unstructured.Unstructured{} + item.SetAPIVersion("custom/v6") + item.SetKind("Image") + item.SetName("my-item") + err := cl.Create(ctx, item) + Expect(err).ToNot(HaveOccurred()) + + By("Updating the object") + original := item.DeepCopy() + err = unstructured.SetNestedField(item.Object, int64(2), "spec", "replicas") + Expect(err).ToNot(HaveOccurred()) + err = cl.Patch(ctx, item, client.MergeFrom(original)) + Expect(err).ToNot(HaveOccurred()) + + By("Getting the object") + item = &unstructured.Unstructured{} + item.SetAPIVersion("custom/v6") + item.SetKind("Image") + item.SetName("my-item") + err = cl.Get(ctx, client.ObjectKeyFromObject(item), item) + Expect(err).ToNot(HaveOccurred()) + + By("Inspecting the object") + value, found, err := unstructured.NestedInt64(item.Object, "spec", "replicas") + Expect(err).ToNot(HaveOccurred()) + Expect(found).To(BeTrue()) + Expect(value).To(Equal(int64(2))) + }) + + It("should be able to Delete an unregistered type using unstructured", func(ctx SpecContext) { + By("Creating an object of an unregistered type") + item := &unstructured.Unstructured{} + item.SetAPIVersion("custom/v7") + item.SetKind("Image") + item.SetName("my-item") + err := cl.Create(ctx, item) + Expect(err).ToNot(HaveOccurred()) + + By("Deleting the object") + err = cl.Delete(ctx, item) + Expect(err).ToNot(HaveOccurred()) + + By("Getting the object") + item = &unstructured.Unstructured{} + item.SetAPIVersion("custom/v7") + item.SetKind("Image") + item.SetName("my-item") + err = cl.Get(ctx, client.ObjectKeyFromObject(item), item) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + It("should be able to retrieve objects by PartialObjectMetadata", func(ctx SpecContext) { + By("Creating a Resource") + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + }, + } + err := cl.Create(ctx, secret) + Expect(err).ToNot(HaveOccurred()) + + By("Fetching the resource using a PartialObjectMeta") + partialObjMeta := &metav1.PartialObjectMetadata{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + }, + } + partialObjMeta.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) + + err = cl.Get(ctx, client.ObjectKeyFromObject(partialObjMeta), partialObjMeta) + Expect(err).ToNot(HaveOccurred()) + + Expect(partialObjMeta.Kind).To(Equal("Secret")) + Expect(partialObjMeta.APIVersion).To(Equal("v1")) + }) + + It("should support filtering by labels and their values", func(ctx SpecContext) { + By("Listing deployments with a particular label and value") list := &appsv1.DeploymentList{} - err := cl.List(nil, list, client.InNamespace("ns1"), + err := cl.List(ctx, list, client.InNamespace("ns1"), client.MatchingLabels(map[string]string{ "test-label": "label-value", })) - Expect(err).To(BeNil()) + Expect(err).ToNot(HaveOccurred()) + Expect(list.Items).To(HaveLen(1)) + Expect(list.Items).To(ConsistOf(*dep2)) + }) + + It("should support filtering by label existence", func(ctx SpecContext) { + By("Listing deployments with a particular label") + list := &appsv1.DeploymentList{} + err := cl.List(ctx, list, client.InNamespace("ns1"), + client.HasLabels{"test-label"}) + Expect(err).ToNot(HaveOccurred()) Expect(list.Items).To(HaveLen(1)) Expect(list.Items).To(ConsistOf(*dep2)) }) - It("should be able to Create", func() { + It("should be able to Create", func(ctx SpecContext) { By("Creating a new configmap") newcm := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ @@ -106,8 +401,8 @@ var _ = Describe("Fake client", func() { Namespace: "ns2", }, } - err := cl.Create(nil, newcm) - Expect(err).To(BeNil()) + err := cl.Create(ctx, newcm) + Expect(err).ToNot(HaveOccurred()) By("Getting the new configmap") namespacedName := types.NamespacedName{ @@ -115,24 +410,97 @@ var _ = Describe("Fake client", func() { Namespace: "ns2", } obj := &corev1.ConfigMap{} - err = cl.Get(nil, namespacedName, obj) - Expect(err).To(BeNil()) + err = cl.Get(ctx, namespacedName, obj) + Expect(err).ToNot(HaveOccurred()) Expect(obj).To(Equal(newcm)) + Expect(obj.ObjectMeta.ResourceVersion).To(Equal("1")) }) - It("should be able to Update", func() { - By("Updating a new configmap") + It("should error on create with set resourceVersion", func(ctx SpecContext) { + By("Creating a new configmap") + newcm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "new-test-cm", + Namespace: "ns2", + ResourceVersion: "1", + }, + } + err := cl.Create(ctx, newcm) + Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + + It("should not change the submitted object if Create failed", func(ctx SpecContext) { + By("Trying to create an existing configmap") + submitted := cm.DeepCopy() + submitted.ResourceVersion = "" + submittedReference := submitted.DeepCopy() + err := cl.Create(ctx, submitted) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsAlreadyExists(err)).To(BeTrue()) + Expect(submitted).To(BeComparableTo(submittedReference)) + }) + + It("should error on Create with empty Name", func(ctx SpecContext) { + By("Creating a new configmap") + newcm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns2", + }, + } + err := cl.Create(ctx, newcm) + Expect(err.Error()).To(Equal("ConfigMap \"\" is invalid: metadata.name: Required value: name is required")) + }) + + It("should error on Update with empty Name", func(ctx SpecContext) { + By("Creating a new configmap") newcm := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", Namespace: "ns2", }, + } + err := cl.Update(ctx, newcm) + Expect(err.Error()).To(Equal("ConfigMap \"\" is invalid: metadata.name: Required value: name is required")) + }) + + It("should be able to Create with GenerateName", func(ctx SpecContext) { + By("Creating a new configmap") + newcm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "new-test-cm", + Namespace: "ns2", + Labels: map[string]string{ + "test-label": "label-value", + }, + }, + } + err := cl.Create(ctx, newcm) + Expect(err).ToNot(HaveOccurred()) + + By("Listing configmaps with a particular label") + list := &corev1.ConfigMapList{} + err = cl.List(ctx, list, client.InNamespace("ns2"), + client.MatchingLabels(map[string]string{ + "test-label": "label-value", + })) + Expect(err).ToNot(HaveOccurred()) + Expect(list.Items).To(HaveLen(1)) + Expect(list.Items[0].Name).NotTo(BeEmpty()) + }) + + It("should be able to Update", func(ctx SpecContext) { + By("Updating a new configmap") + newcm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "ns2", + ResourceVersion: "", + }, Data: map[string]string{ "test-key": "new-value", }, } - err := cl.Update(nil, newcm) - Expect(err).To(BeNil()) + err := cl.Update(ctx, newcm) + Expect(err).ToNot(HaveOccurred()) By("Getting the new configmap") namespacedName := types.NamespacedName{ @@ -140,115 +508,2813 @@ var _ = Describe("Fake client", func() { Namespace: "ns2", } obj := &corev1.ConfigMap{} - err = cl.Get(nil, namespacedName, obj) - Expect(err).To(BeNil()) + err = cl.Get(ctx, namespacedName, obj) + Expect(err).ToNot(HaveOccurred()) Expect(obj).To(Equal(newcm)) + Expect(obj.ObjectMeta.ResourceVersion).To(Equal("1000")) }) - It("should be able to Delete", func() { - By("Deleting a deployment") - err := cl.Delete(nil, dep) - Expect(err).To(BeNil()) + It("should allow updates with non-set ResourceVersion for a resource that allows unconditional updates", func(ctx SpecContext) { + By("Updating a new configmap") + newcm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "ns2", + }, + Data: map[string]string{ + "test-key": "new-value", + }, + } + err := cl.Update(ctx, newcm) + Expect(err).ToNot(HaveOccurred()) - By("Listing all deployments in the namespace") - list := &appsv1.DeploymentList{} - err = cl.List(nil, list, client.InNamespace("ns1")) - Expect(err).To(BeNil()) - Expect(list.Items).To(HaveLen(1)) - Expect(list.Items).To(ConsistOf(*dep2)) + By("Getting the configmap") + namespacedName := types.NamespacedName{ + Name: "test-cm", + Namespace: "ns2", + } + obj := &corev1.ConfigMap{} + err = cl.Get(ctx, namespacedName, obj) + Expect(err).ToNot(HaveOccurred()) + Expect(obj).To(Equal(newcm)) + Expect(obj.ObjectMeta.ResourceVersion).To(Equal("1000")) }) - Context("with the DryRun option", func() { - It("should not create a new object", func() { - By("Creating a new configmap with DryRun") - newcm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "new-test-cm", - Namespace: "ns2", + It("should allow patch when the patch sets RV to 'null'", func(ctx SpecContext) { + cl := NewClientBuilder().Build() + original := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "obj", + Namespace: "ns2", + }} + + err := cl.Create(ctx, original) + Expect(err).ToNot(HaveOccurred()) + + newObj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: original.Name, + Namespace: original.Namespace, + Annotations: map[string]string{ + "foo": "bar", }, - } - err := cl.Create(nil, newcm, client.CreateDryRunAll) - Expect(err).To(BeNil()) + }} - By("Getting the new configmap") - namespacedName := types.NamespacedName{ - Name: "new-test-cm", + Expect(cl.Patch(ctx, newObj, client.MergeFrom(original))).To(Succeed()) + + patched := &corev1.ConfigMap{} + Expect(cl.Get(ctx, client.ObjectKeyFromObject(original), patched)).To(Succeed()) + Expect(patched.Annotations).To(Equal(map[string]string{"foo": "bar"})) + }) + + It("should reject updates with non-set ResourceVersion for a resource that doesn't allow unconditional updates", func(ctx SpecContext) { + By("Creating a new binding") + binding := &corev1.Binding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", Namespace: "ns2", - } - obj := &corev1.ConfigMap{} - err = cl.Get(nil, namespacedName, obj) - Expect(err).To(HaveOccurred()) - Expect(errors.IsNotFound(err)).To(BeTrue()) - Expect(obj).NotTo(Equal(newcm)) - }) + }, + Target: corev1.ObjectReference{ + Kind: "ConfigMap", + APIVersion: "v1", + Namespace: cm.Namespace, + Name: cm.Name, + }, + } + Expect(cl.Create(ctx, binding)).To(Succeed()) - It("should not Update the object", func() { - By("Updating a new configmap with DryRun") - newcm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "ns2", - }, - Data: map[string]string{ - "test-key": "new-value", - }, - } - err := cl.Update(nil, newcm, client.UpdateDryRunAll) - Expect(err).To(BeNil()) + By("Updating the binding with a new resource lacking resource version") + newBinding := &corev1.Binding{ + ObjectMeta: metav1.ObjectMeta{ + Name: binding.Name, + Namespace: binding.Namespace, + }, + Target: corev1.ObjectReference{ + Namespace: binding.Namespace, + Name: "blue", + }, + } + Expect(cl.Update(ctx, newBinding)).NotTo(Succeed()) + }) - By("Getting the new configmap") - namespacedName := types.NamespacedName{ - Name: "test-cm", + It("should allow create on update for a resource that allows create on update", func(ctx SpecContext) { + By("Creating a new lease with update") + lease := &coordinationv1.Lease{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-lease", Namespace: "ns2", - } - obj := &corev1.ConfigMap{} - err = cl.Get(nil, namespacedName, obj) - Expect(err).To(BeNil()) - Expect(obj).To(Equal(cm)) - }) + }, + Spec: coordinationv1.LeaseSpec{}, + } + Expect(cl.Create(ctx, lease)).To(Succeed()) + + By("Getting the lease") + namespacedName := types.NamespacedName{ + Name: lease.Name, + Namespace: lease.Namespace, + } + obj := &coordinationv1.Lease{} + Expect(cl.Get(ctx, namespacedName, obj)).To(Succeed()) + Expect(obj).To(Equal(lease)) + Expect(obj.ObjectMeta.ResourceVersion).To(Equal("1")) }) - It("should be able to Patch", func() { - By("Patching a deployment") - mergePatch, err := json.Marshal(map[string]interface{}{ - "metadata": map[string]interface{}{ - "annotations": map[string]interface{}{ - "foo": "bar", - }, + It("should reject create on update for a resource that does not allow create on update", func(ctx SpecContext) { + By("Attemping to create a new configmap with update") + newcm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "different-test-cm", + Namespace: "ns2", }, - }) - Expect(err).NotTo(HaveOccurred()) - err = cl.Patch(nil, dep, client.ConstantPatch(types.StrategicMergePatchType, mergePatch)) - Expect(err).NotTo(HaveOccurred()) + Data: map[string]string{ + "test-key": "new-value", + }, + } + Expect(cl.Update(ctx, newcm)).NotTo(Succeed()) + }) - By("Getting the patched deployment") + It("should reject updates with non-matching ResourceVersion", func(ctx SpecContext) { + By("Updating a new configmap") + newcm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "ns2", + ResourceVersion: "1", + }, + Data: map[string]string{ + "test-key": "new-value", + }, + } + err := cl.Update(ctx, newcm) + Expect(apierrors.IsConflict(err)).To(BeTrue()) + + By("Getting the configmap") namespacedName := types.NamespacedName{ - Name: "test-deployment", - Namespace: "ns1", + Name: "test-cm", + Namespace: "ns2", } - obj := &appsv1.Deployment{} - err = cl.Get(nil, namespacedName, obj) - Expect(err).NotTo(HaveOccurred()) - Expect(obj.Annotations["foo"]).To(Equal("bar")) + obj := &corev1.ConfigMap{} + err = cl.Get(ctx, namespacedName, obj) + Expect(err).ToNot(HaveOccurred()) + Expect(obj).To(Equal(cm)) + Expect(obj.ObjectMeta.ResourceVersion).To(Equal(trackerAddResourceVersion)) }) - } - Context("with default scheme.Scheme", func() { - BeforeEach(func(done Done) { - cl = NewFakeClient(dep, dep2, cm) - close(done) + It("should reject apply with non-matching ResourceVersion", func(ctx SpecContext) { + cl := NewClientBuilder().WithRuntimeObjects(cm).Build() + applyCM := corev1applyconfigurations.ConfigMap(cm.Name, cm.Namespace).WithResourceVersion("0") + err := cl.Apply(ctx, applyCM, client.FieldOwner("test")) + Expect(apierrors.IsConflict(err)).To(BeTrue()) + + obj := &corev1.ConfigMap{} + err = cl.Get(ctx, client.ObjectKeyFromObject(cm), obj) + Expect(err).ToNot(HaveOccurred()) + Expect(obj).To(Equal(cm)) + Expect(obj.ObjectMeta.ResourceVersion).To(Equal(trackerAddResourceVersion)) }) - AssertClientBehavior() - }) - Context("with given scheme", func() { - BeforeEach(func(done Done) { - scheme := runtime.NewScheme() - Expect(corev1.AddToScheme(scheme)).To(Succeed()) - Expect(appsv1.AddToScheme(scheme)).To(Succeed()) - cl = NewFakeClientWithScheme(scheme, dep, dep2, cm) - close(done) + It("should reject Delete with a mismatched ResourceVersion", func(ctx SpecContext) { + bogusRV := "bogus" + By("Deleting with a mismatched ResourceVersion Precondition") + err := cl.Delete(ctx, dep, client.Preconditions{ResourceVersion: &bogusRV}) + Expect(apierrors.IsConflict(err)).To(BeTrue()) + + list := &appsv1.DeploymentList{} + err = cl.List(ctx, list, client.InNamespace("ns1")) + Expect(err).ToNot(HaveOccurred()) + Expect(list.Items).To(HaveLen(2)) + Expect(list.Items).To(ConsistOf(*dep, *dep2)) }) - AssertClientBehavior() + + It("should successfully Delete with a matching ResourceVersion", func(ctx SpecContext) { + goodRV := trackerAddResourceVersion + By("Deleting with a matching ResourceVersion Precondition") + err := cl.Delete(ctx, dep, client.Preconditions{ResourceVersion: &goodRV}) + Expect(err).ToNot(HaveOccurred()) + + list := &appsv1.DeploymentList{} + err = cl.List(ctx, list, client.InNamespace("ns1")) + Expect(err).ToNot(HaveOccurred()) + Expect(list.Items).To(HaveLen(1)) + Expect(list.Items).To(ConsistOf(*dep2)) + }) + + It("should be able to Delete with no ResourceVersion Precondition", func(ctx SpecContext) { + By("Deleting a deployment") + err := cl.Delete(ctx, dep) + Expect(err).ToNot(HaveOccurred()) + + By("Listing all deployments in the namespace") + list := &appsv1.DeploymentList{} + err = cl.List(ctx, list, client.InNamespace("ns1")) + Expect(err).ToNot(HaveOccurred()) + Expect(list.Items).To(HaveLen(1)) + Expect(list.Items).To(ConsistOf(*dep2)) + }) + + It("should be able to Delete with no opts even if object's ResourceVersion doesn't match server", func(ctx SpecContext) { + By("Deleting a deployment") + depCopy := dep.DeepCopy() + depCopy.ResourceVersion = "bogus" + err := cl.Delete(ctx, depCopy) + Expect(err).ToNot(HaveOccurred()) + + By("Listing all deployments in the namespace") + list := &appsv1.DeploymentList{} + err = cl.List(ctx, list, client.InNamespace("ns1")) + Expect(err).ToNot(HaveOccurred()) + Expect(list.Items).To(HaveLen(1)) + Expect(list.Items).To(ConsistOf(*dep2)) + }) + + It("should handle finalizers in Apply ", func(ctx SpecContext) { + cl := client.WithFieldOwner(cl, "test") + + By("Creating the object with a finalizer") + cm := corev1applyconfigurations.ConfigMap("test-cm", "delete-with-finalizers"). + WithFinalizers("finalizers.sigs.k8s.io/test") + Expect(cl.Apply(ctx, cm)).To(Succeed()) + + By("Deleting the object") + obj := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{ + Name: *cm.Name, + Namespace: *cm.Namespace, + }} + Expect(cl.Delete(ctx, obj)).NotTo(HaveOccurred()) + + By("Getting the object") + Expect(cl.Get(ctx, client.ObjectKeyFromObject(obj), obj)).NotTo(HaveOccurred()) + Expect(obj.DeletionTimestamp).NotTo(BeNil()) + + By("Removing the finalizer through SSA") + cm.ResourceVersion = nil + cm.Finalizers = nil + Expect(cl.Apply(ctx, cm)).NotTo(HaveOccurred()) + + By("Getting the object") + err := cl.Get(ctx, client.ObjectKeyFromObject(obj), &corev1.ConfigMap{}) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + It("should handle finalizers on Update", func(ctx SpecContext) { + namespacedName := types.NamespacedName{ + Name: "test-cm", + Namespace: "delete-with-finalizers", + } + By("Updating a new object") + newObj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespacedName.Name, + Namespace: namespacedName.Namespace, + Finalizers: []string{"finalizers.sigs.k8s.io/test"}, + }, + Data: map[string]string{ + "test-key": "new-value", + }, + } + err := cl.Create(ctx, newObj) + Expect(err).ToNot(HaveOccurred()) + + By("Deleting the object") + err = cl.Delete(ctx, newObj) + Expect(err).ToNot(HaveOccurred()) + + By("Getting the object") + obj := &corev1.ConfigMap{} + err = cl.Get(ctx, namespacedName, obj) + Expect(err).ToNot(HaveOccurred()) + Expect(obj.DeletionTimestamp).NotTo(BeNil()) + + By("Removing the finalizer") + obj.Finalizers = []string{} + err = cl.Update(ctx, obj) + Expect(err).ToNot(HaveOccurred()) + + By("Getting the object") + obj = &corev1.ConfigMap{} + err = cl.Get(ctx, namespacedName, obj) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + It("should reject changes to deletionTimestamp on Update", func(ctx SpecContext) { + namespacedName := types.NamespacedName{ + Name: "test-cm", + Namespace: "reject-with-deletiontimestamp", + } + By("Updating a new object") + newObj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespacedName.Name, + Namespace: namespacedName.Namespace, + }, + Data: map[string]string{ + "test-key": "new-value", + }, + } + err := cl.Create(ctx, newObj) + Expect(err).ToNot(HaveOccurred()) + + By("Getting the object") + obj := &corev1.ConfigMap{} + err = cl.Get(ctx, namespacedName, obj) + Expect(err).ToNot(HaveOccurred()) + Expect(obj.DeletionTimestamp).To(BeNil()) + + By("Adding deletionTimestamp") + now := metav1.Now() + obj.DeletionTimestamp = &now + err = cl.Update(ctx, obj) + Expect(err).To(HaveOccurred()) + + By("Deleting the object") + err = cl.Delete(ctx, newObj) + Expect(err).ToNot(HaveOccurred()) + + By("Changing the deletionTimestamp to new value") + obj = &corev1.ConfigMap{} + t := metav1.NewTime(time.Now().Add(time.Second)) + obj.DeletionTimestamp = &t + err = cl.Update(ctx, obj) + Expect(err).To(HaveOccurred()) + + By("Removing deletionTimestamp") + obj.DeletionTimestamp = nil + err = cl.Update(ctx, obj) + Expect(err).To(HaveOccurred()) + + }) + + It("should be able to Delete a Collection", func(ctx SpecContext) { + By("Deleting a deploymentList") + err := cl.DeleteAllOf(ctx, &appsv1.Deployment{}, client.InNamespace("ns1")) + Expect(err).ToNot(HaveOccurred()) + + By("Listing all deployments in the namespace") + list := &appsv1.DeploymentList{} + err = cl.List(ctx, list, client.InNamespace("ns1")) + Expect(err).ToNot(HaveOccurred()) + Expect(list.Items).To(BeEmpty()) + }) + + It("should handle finalizers deleting a collection", func(ctx SpecContext) { + for i := 0; i < 5; i++ { + namespacedName := types.NamespacedName{ + Name: fmt.Sprintf("test-cm-%d", i), + Namespace: "delete-collection-with-finalizers", + } + By("Creating a new object") + newObj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespacedName.Name, + Namespace: namespacedName.Namespace, + Finalizers: []string{"finalizers.sigs.k8s.io/test"}, + }, + Data: map[string]string{ + "test-key": "new-value", + }, + } + err := cl.Create(ctx, newObj) + Expect(err).ToNot(HaveOccurred()) + } + + By("Deleting the object") + err := cl.DeleteAllOf(ctx, &corev1.ConfigMap{}, client.InNamespace("delete-collection-with-finalizers")) + Expect(err).ToNot(HaveOccurred()) + + configmaps := corev1.ConfigMapList{} + err = cl.List(ctx, &configmaps, client.InNamespace("delete-collection-with-finalizers")) + Expect(err).ToNot(HaveOccurred()) + + Expect(configmaps.Items).To(HaveLen(5)) + for _, cm := range configmaps.Items { + Expect(cm.DeletionTimestamp).NotTo(BeNil()) + } + }) + + It("should be able to watch", func(ctx SpecContext) { + By("Creating a watch") + objWatch, err := cl.Watch(ctx, &corev1.ServiceList{}) + Expect(err).NotTo(HaveOccurred()) + + defer objWatch.Stop() + + go func() { + defer GinkgoRecover() + // It is likely starting a new goroutine is slower than progressing + // in the outer routine, sleep to make sure this is always true + time.Sleep(100 * time.Millisecond) + + err := cl.Create(ctx, &corev1.Service{ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "for-watch"}}) + Expect(err).ToNot(HaveOccurred()) + }() + + event, ok := <-objWatch.ResultChan() + Expect(ok).To(BeTrue()) + Expect(event.Type).To(Equal(watch.Added)) + + service, ok := event.Object.(*corev1.Service) + Expect(ok).To(BeTrue()) + Expect(service.Name).To(Equal("for-watch")) + }) + + Context("with the DryRun option", func() { + It("should not create a new object", func(ctx SpecContext) { + By("Creating a new configmap with DryRun") + newcm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "new-test-cm", + Namespace: "ns2", + }, + } + err := cl.Create(ctx, newcm, client.DryRunAll) + Expect(err).ToNot(HaveOccurred()) + + By("Getting the new configmap") + namespacedName := types.NamespacedName{ + Name: "new-test-cm", + Namespace: "ns2", + } + obj := &corev1.ConfigMap{} + err = cl.Get(ctx, namespacedName, obj) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + Expect(obj).NotTo(Equal(newcm)) + }) + + It("should not Update the object", func(ctx SpecContext) { + By("Updating a new configmap with DryRun") + newcm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "ns2", + ResourceVersion: "1", + }, + Data: map[string]string{ + "test-key": "new-value", + }, + } + err := cl.Update(ctx, newcm, client.DryRunAll) + Expect(err).ToNot(HaveOccurred()) + + By("Getting the new configmap") + namespacedName := types.NamespacedName{ + Name: "test-cm", + Namespace: "ns2", + } + obj := &corev1.ConfigMap{} + err = cl.Get(ctx, namespacedName, obj) + Expect(err).ToNot(HaveOccurred()) + Expect(obj).To(Equal(cm)) + Expect(obj.ObjectMeta.ResourceVersion).To(Equal(trackerAddResourceVersion)) + }) + + It("Should not Delete the object", func(ctx SpecContext) { + By("Deleting a configmap with DryRun with Delete()") + err := cl.Delete(ctx, cm, client.DryRunAll) + Expect(err).ToNot(HaveOccurred()) + + By("Deleting a configmap with DryRun with DeleteAllOf()") + err = cl.DeleteAllOf(ctx, cm, client.DryRunAll) + Expect(err).ToNot(HaveOccurred()) + + By("Getting the configmap") + namespacedName := types.NamespacedName{ + Name: "test-cm", + Namespace: "ns2", + } + obj := &corev1.ConfigMap{} + err = cl.Get(ctx, namespacedName, obj) + Expect(err).ToNot(HaveOccurred()) + Expect(obj).To(Equal(cm)) + Expect(obj.ObjectMeta.ResourceVersion).To(Equal(trackerAddResourceVersion)) + }) + }) + + It("should be able to Patch", func(ctx SpecContext) { + By("Patching a deployment") + mergePatch, err := json.Marshal(map[string]interface{}{ + "metadata": map[string]interface{}{ + "annotations": map[string]interface{}{ + "foo": "bar", + }, + }, + }) + Expect(err).NotTo(HaveOccurred()) + err = cl.Patch(ctx, dep, client.RawPatch(types.StrategicMergePatchType, mergePatch)) + Expect(err).NotTo(HaveOccurred()) + + By("Getting the patched deployment") + namespacedName := types.NamespacedName{ + Name: "test-deployment", + Namespace: "ns1", + } + obj := &appsv1.Deployment{} + err = cl.Get(ctx, namespacedName, obj) + Expect(err).NotTo(HaveOccurred()) + Expect(obj.Annotations["foo"]).To(Equal("bar")) + Expect(obj.ObjectMeta.ResourceVersion).To(Equal("1000")) + }) + + It("should ignore deletionTimestamp without finalizer on Create", func(ctx SpecContext) { + namespacedName := types.NamespacedName{ + Name: "test-cm", + Namespace: "ignore-deletiontimestamp", + } + By("Creating a new object") + now := metav1.Now() + newObj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespacedName.Name, + Namespace: namespacedName.Namespace, + Finalizers: []string{"finalizers.sigs.k8s.io/test"}, + DeletionTimestamp: &now, + }, + Data: map[string]string{ + "test-key": "new-value", + }, + } + + err := cl.Create(ctx, newObj) + Expect(err).ToNot(HaveOccurred()) + + By("Getting the object") + obj := &corev1.ConfigMap{} + err = cl.Get(ctx, namespacedName, obj) + Expect(err).ToNot(HaveOccurred()) + Expect(obj.DeletionTimestamp).To(BeNil()) + + }) + + It("should reject deletionTimestamp without finalizers on Build", func(ctx SpecContext) { + namespacedName := types.NamespacedName{ + Name: "test-cm", + Namespace: "reject-deletiontimestamp-no-finalizers", + } + By("Build with a new object without finalizer") + now := metav1.Now() + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespacedName.Name, + Namespace: namespacedName.Namespace, + DeletionTimestamp: &now, + }, + Data: map[string]string{ + "test-key": "new-value", + }, + } + + Expect(func() { NewClientBuilder().WithObjects(obj).Build() }).To(Panic()) + + By("Build with a new object with finalizer") + newObj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespacedName.Name, + Namespace: namespacedName.Namespace, + Finalizers: []string{"finalizers.sigs.k8s.io/test"}, + DeletionTimestamp: &now, + }, + Data: map[string]string{ + "test-key": "new-value", + }, + } + + cl := NewClientBuilder().WithObjects(newObj).Build() + + By("Getting the object") + obj = &corev1.ConfigMap{} + err := cl.Get(ctx, namespacedName, obj) + Expect(err).ToNot(HaveOccurred()) + + }) + + It("should reject changes to deletionTimestamp on Patch", func(ctx SpecContext) { + namespacedName := types.NamespacedName{ + Name: "test-cm", + Namespace: "reject-deletiontimestamp", + } + By("Creating a new object") + now := metav1.Now() + newObj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespacedName.Name, + Namespace: namespacedName.Namespace, + Finalizers: []string{"finalizers.sigs.k8s.io/test"}, + }, + Data: map[string]string{ + "test-key": "new-value", + }, + } + err := cl.Create(ctx, newObj) + Expect(err).ToNot(HaveOccurred()) + + By("Add a deletionTimestamp") + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespacedName.Name, + Namespace: namespacedName.Namespace, + Finalizers: []string{}, + DeletionTimestamp: &now, + }, + } + err = cl.Patch(ctx, obj, client.MergeFrom(newObj)) + Expect(err).To(HaveOccurred()) + + By("Deleting the object") + err = cl.Delete(ctx, newObj) + Expect(err).ToNot(HaveOccurred()) + + By("Getting the object") + obj = &corev1.ConfigMap{} + err = cl.Get(ctx, namespacedName, obj) + Expect(err).ToNot(HaveOccurred()) + Expect(obj.DeletionTimestamp).NotTo(BeNil()) + + By("Changing the deletionTimestamp to new value") + newObj = &corev1.ConfigMap{} + t := metav1.NewTime(time.Now().Add(time.Second)) + newObj.DeletionTimestamp = &t + err = cl.Patch(ctx, newObj, client.MergeFrom(obj)) + Expect(err).To(HaveOccurred()) + + By("Removing deletionTimestamp") + newObj = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespacedName.Name, + Namespace: namespacedName.Namespace, + DeletionTimestamp: nil, + }, + } + err = cl.Patch(ctx, newObj, client.MergeFrom(obj)) + Expect(err).To(HaveOccurred()) + + }) + + It("should handle finalizers on Patch", func(ctx SpecContext) { + namespacedName := types.NamespacedName{ + Name: "test-cm", + Namespace: "delete-with-finalizers", + } + By("Creating a new object") + newObj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespacedName.Name, + Namespace: namespacedName.Namespace, + Finalizers: []string{"finalizers.sigs.k8s.io/test"}, + }, + Data: map[string]string{ + "test-key": "new-value", + }, + } + err := cl.Create(ctx, newObj) + Expect(err).ToNot(HaveOccurred()) + + By("Deleting the object") + err = cl.Delete(ctx, newObj) + Expect(err).ToNot(HaveOccurred()) + + By("Removing the finalizer") + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespacedName.Name, + Namespace: namespacedName.Namespace, + Finalizers: []string{}, + }, + } + err = cl.Patch(ctx, obj, client.MergeFrom(newObj)) + Expect(err).ToNot(HaveOccurred()) + + By("Getting the object") + obj = &corev1.ConfigMap{} + err = cl.Get(ctx, namespacedName, obj) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + It("should remove finalizers of the object on Patch", func(ctx SpecContext) { + namespacedName := types.NamespacedName{ + Name: "test-cm", + Namespace: "patch-finalizers-in-obj", + } + By("Creating a new object") + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespacedName.Name, + Namespace: namespacedName.Namespace, + Finalizers: []string{"finalizers.sigs.k8s.io/test"}, + }, + Data: map[string]string{ + "test-key": "new-value", + }, + } + err := cl.Create(ctx, obj) + Expect(err).ToNot(HaveOccurred()) + + By("Removing the finalizer") + mergePatch, err := json.Marshal(map[string]interface{}{ + "metadata": map[string]interface{}{ + "$deleteFromPrimitiveList/finalizers": []string{ + "finalizers.sigs.k8s.io/test", + }, + }, + }) + Expect(err).ToNot(HaveOccurred()) + err = cl.Patch(ctx, obj, client.RawPatch(types.StrategicMergePatchType, mergePatch)) + Expect(err).ToNot(HaveOccurred()) + + By("Check the finalizer has been removed in the object") + Expect(obj.Finalizers).To(BeEmpty()) + + By("Check the finalizer has been removed in client") + newObj := &corev1.ConfigMap{} + err = cl.Get(ctx, namespacedName, newObj) + Expect(err).ToNot(HaveOccurred()) + Expect(newObj.Finalizers).To(BeEmpty()) + }) + + } + + Context("with default scheme.Scheme", func() { + BeforeEach(func() { + cl = NewClientBuilder(). + WithObjects(dep, dep2, cm). + Build() + }) + AssertClientWithoutIndexBehavior() + }) + + Context("with given scheme", func() { + BeforeEach(func() { + scheme := runtime.NewScheme() + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + Expect(appsv1.AddToScheme(scheme)).To(Succeed()) + Expect(coordinationv1.AddToScheme(scheme)).To(Succeed()) + cl = NewClientBuilder(). + WithScheme(scheme). + WithObjects(cm). + WithLists(&appsv1.DeploymentList{Items: []appsv1.Deployment{*dep, *dep2}}). + Build() + }) + AssertClientWithoutIndexBehavior() + }) + + Context("with Indexes", func() { + depReplicasIndexer := func(obj client.Object) []string { + dep, ok := obj.(*appsv1.Deployment) + if !ok { + panic(fmt.Errorf("indexer function for type %T's spec.replicas field received"+ + " object of type %T, this should never happen", appsv1.Deployment{}, obj)) + } + indexVal := "" + if dep.Spec.Replicas != nil { + indexVal = strconv.Itoa(int(*dep.Spec.Replicas)) + } + return []string{indexVal} + } + + depStrategyTypeIndexer := func(obj client.Object) []string { + dep, ok := obj.(*appsv1.Deployment) + if !ok { + panic(fmt.Errorf("indexer function for type %T's spec.strategy.type field received"+ + " object of type %T, this should never happen", appsv1.Deployment{}, obj)) + } + return []string{string(dep.Spec.Strategy.Type)} + } + + var cb *ClientBuilder + BeforeEach(func() { + cb = NewClientBuilder(). + WithObjects(dep, dep2, cm). + WithIndex(&appsv1.Deployment{}, "spec.replicas", depReplicasIndexer) + }) + + Context("client has just one Index", func() { + BeforeEach(func() { cl = cb.Build() }) + + Context("behavior that doesn't use an Index", func() { + AssertClientWithoutIndexBehavior() + }) + + Context("filtered List using field selector", func() { + It("errors when there's no Index for the GroupVersionResource", func(ctx SpecContext) { + listOpts := &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("key", "val"), + } + list := &corev1.ConfigMapList{} + err := cl.List(ctx, list, listOpts) + Expect(err).To(HaveOccurred()) + Expect(list.Items).To(BeEmpty()) + }) + + It("errors when there's no Index for the GroupVersionResource with UnstructuredList", func(ctx SpecContext) { + listOpts := &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("key", "val"), + } + list := &unstructured.UnstructuredList{} + list.SetAPIVersion("v1") + list.SetKind("ConfigMapList") + err := cl.List(ctx, list, listOpts) + Expect(err).To(HaveOccurred()) + Expect(list.GroupVersionKind().GroupVersion().String()).To(Equal("v1")) + Expect(list.GetKind()).To(Equal("ConfigMapList")) + Expect(list.Items).To(BeEmpty()) + }) + + It("errors when there's no Index matching the field name", func(ctx SpecContext) { + listOpts := &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("spec.paused", "false"), + } + list := &appsv1.DeploymentList{} + err := cl.List(ctx, list, listOpts) + Expect(err).To(HaveOccurred()) + Expect(list.Items).To(BeEmpty()) + }) + + It("errors when field selector uses two requirements", func(ctx SpecContext) { + listOpts := &client.ListOptions{ + FieldSelector: fields.AndSelectors( + fields.OneTermEqualSelector("spec.replicas", "1"), + fields.OneTermEqualSelector("spec.strategy.type", string(appsv1.RecreateDeploymentStrategyType)), + )} + list := &appsv1.DeploymentList{} + err := cl.List(ctx, list, listOpts) + Expect(err).To(HaveOccurred()) + Expect(list.Items).To(BeEmpty()) + }) + + It("returns two deployments that match the only field selector requirement", func(ctx SpecContext) { + listOpts := &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("spec.replicas", "1"), + } + list := &appsv1.DeploymentList{} + Expect(cl.List(ctx, list, listOpts)).To(Succeed()) + Expect(list.Items).To(ConsistOf(*dep, *dep2)) + }) + + It("returns no object because no object matches the only field selector requirement", func(ctx SpecContext) { + listOpts := &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("spec.replicas", "2"), + } + list := &appsv1.DeploymentList{} + Expect(cl.List(ctx, list, listOpts)).To(Succeed()) + Expect(list.Items).To(BeEmpty()) + }) + + It("returns deployment that matches both the field and label selectors", func(ctx SpecContext) { + listOpts := &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("spec.replicas", "1"), + LabelSelector: labels.SelectorFromSet(dep2.Labels), + } + list := &appsv1.DeploymentList{} + Expect(cl.List(ctx, list, listOpts)).To(Succeed()) + Expect(list.Items).To(ConsistOf(*dep2)) + }) + + It("returns no object even if field selector matches because label selector doesn't", func(ctx SpecContext) { + listOpts := &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("spec.replicas", "1"), + LabelSelector: labels.Nothing(), + } + list := &appsv1.DeploymentList{} + Expect(cl.List(ctx, list, listOpts)).To(Succeed()) + Expect(list.Items).To(BeEmpty()) + }) + + It("returns no object even if label selector matches because field selector doesn't", func(ctx SpecContext) { + listOpts := &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("spec.replicas", "2"), + LabelSelector: labels.Everything(), + } + list := &appsv1.DeploymentList{} + Expect(cl.List(ctx, list, listOpts)).To(Succeed()) + Expect(list.Items).To(BeEmpty()) + }) + + It("supports adding an index at runtime", func(ctx SpecContext) { + listOpts := &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("metadata.name", "test-deployment-2"), + } + list := &appsv1.DeploymentList{} + err := cl.List(ctx, list, listOpts) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("no index with name metadata.name has been registered")) + + err = AddIndex(cl, &appsv1.Deployment{}, "metadata.name", func(obj client.Object) []string { + return []string{obj.GetName()} + }) + Expect(err).To(Succeed()) + + Expect(cl.List(ctx, list, listOpts)).To(Succeed()) + Expect(list.Items).To(ConsistOf(*dep2)) + }) + + It("Is not a datarace to add and use indexes in parallel", func(ctx SpecContext) { + wg := sync.WaitGroup{} + wg.Add(2) + + listOpts := &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("spec.replicas", "2"), + } + go func() { + defer wg.Done() + defer GinkgoRecover() + Expect(cl.List(ctx, &appsv1.DeploymentList{}, listOpts)).To(Succeed()) + }() + go func() { + defer wg.Done() + defer GinkgoRecover() + err := AddIndex(cl, &appsv1.Deployment{}, "metadata.name", func(obj client.Object) []string { + return []string{obj.GetName()} + }) + Expect(err).To(Succeed()) + }() + wg.Wait() + }) + }) + }) + + Context("client has two Indexes", func() { + BeforeEach(func() { + cl = cb.WithIndex(&appsv1.Deployment{}, "spec.strategy.type", depStrategyTypeIndexer).Build() + }) + + Context("behavior that doesn't use an Index", func() { + AssertClientWithoutIndexBehavior() + }) + + Context("filtered List using field selector", func() { + It("uses the second index to retrieve the indexed objects when there are matches", func(ctx SpecContext) { + listOpts := &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("spec.strategy.type", string(appsv1.RecreateDeploymentStrategyType)), + } + list := &appsv1.DeploymentList{} + Expect(cl.List(ctx, list, listOpts)).To(Succeed()) + Expect(list.Items).To(ConsistOf(*dep)) + }) + + It("uses the second index to retrieve the indexed objects when there are no matches", func(ctx SpecContext) { + listOpts := &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("spec.strategy.type", string(appsv1.RollingUpdateDeploymentStrategyType)), + } + list := &appsv1.DeploymentList{} + Expect(cl.List(ctx, list, listOpts)).To(Succeed()) + Expect(list.Items).To(BeEmpty()) + }) + + It("no error when field selector uses two requirements", func(ctx SpecContext) { + listOpts := &client.ListOptions{ + FieldSelector: fields.AndSelectors( + fields.OneTermEqualSelector("spec.replicas", "1"), + fields.OneTermEqualSelector("spec.strategy.type", string(appsv1.RecreateDeploymentStrategyType)), + )} + list := &appsv1.DeploymentList{} + Expect(cl.List(ctx, list, listOpts)).To(Succeed()) + Expect(list.Items).To(ConsistOf(*dep)) + }) + }) + }) + }) + + It("should set the ResourceVersion to 999 when adding an object to the tracker", func(ctx SpecContext) { + cl := NewClientBuilder().WithObjects(&corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "cm"}}).Build() + + retrieved := &corev1.Secret{} + Expect(cl.Get(ctx, types.NamespacedName{Name: "cm"}, retrieved)).To(Succeed()) + + reference := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cm", + ResourceVersion: "999", + }, + } + Expect(retrieved).To(Equal(reference)) + }) + + It("should be able to build with given tracker and get resource", func(ctx SpecContext) { + clientSet := fake.NewSimpleClientset(dep) + cl := NewClientBuilder().WithRuntimeObjects(dep2).WithObjectTracker(clientSet.Tracker()).Build() + + By("Getting a deployment") + namespacedName := types.NamespacedName{ + Name: "test-deployment", + Namespace: "ns1", + } + obj := &appsv1.Deployment{} + err := cl.Get(ctx, namespacedName, obj) + Expect(err).ToNot(HaveOccurred()) + Expect(obj).To(BeComparableTo(dep)) + + By("Getting a deployment from clientSet") + csDep2, err := clientSet.AppsV1().Deployments("ns1").Get(ctx, "test-deployment-2", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + Expect(csDep2).To(Equal(dep2)) + + By("Getting a new deployment") + namespacedName3 := types.NamespacedName{ + Name: "test-deployment-3", + Namespace: "ns1", + } + + dep3 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment-3", + Namespace: "ns1", + Labels: map[string]string{ + "test-label": "label-value", + }, + ResourceVersion: trackerAddResourceVersion, + }, + } + + _, err = clientSet.AppsV1().Deployments("ns1").Create(ctx, dep3, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + + obj = &appsv1.Deployment{} + err = cl.Get(ctx, namespacedName3, obj) + Expect(err).ToNot(HaveOccurred()) + Expect(obj).To(BeComparableTo(dep3)) + }) + + It("should not change the status of typed objects that have a status subresource on update", func(ctx SpecContext) { + obj := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + }, + } + cl := NewClientBuilder().WithStatusSubresource(obj).WithObjects(obj).Build() + + obj.Status.Phase = "Running" + Expect(cl.Update(ctx, obj)).To(Succeed()) + + Expect(cl.Get(ctx, client.ObjectKeyFromObject(obj), obj)).To(Succeed()) + + Expect(obj.Status).To(BeEquivalentTo(corev1.PodStatus{})) + }) + + It("should return a conflict error when an incorrect RV is used on status update", func(ctx SpecContext) { + obj := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node", + ResourceVersion: trackerAddResourceVersion, + }, + } + cl := NewClientBuilder().WithStatusSubresource(obj).WithObjects(obj).Build() + + obj.Status.Phase = corev1.NodeRunning + obj.ResourceVersion = "invalid" + err := cl.Status().Update(ctx, obj) + Expect(apierrors.IsConflict(err)).To(BeTrue()) + }) + + It("should not change non-status field of typed objects that have a status subresource on status update", func(ctx SpecContext) { + obj := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node", + }, + Spec: corev1.NodeSpec{ + PodCIDR: "old-cidr", + }, + Status: corev1.NodeStatus{ + NodeInfo: corev1.NodeSystemInfo{ + MachineID: "machine-id", + }, + }, + } + cl := NewClientBuilder().WithStatusSubresource(obj).WithObjects(obj).Build() + objOriginal := obj.DeepCopy() + + obj.Spec.PodCIDR = cidrFromStatusUpdate + obj.Annotations = map[string]string{ + "some-annotation-key": "some-annotation-value", + } + obj.Labels = map[string]string{ + "some-label-key": "some-label-value", + } + + obj.Status.NodeInfo.MachineID = machineIDFromStatusUpdate + Expect(cl.Status().Update(ctx, obj)).NotTo(HaveOccurred()) + + actual := &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: obj.Name}} + Expect(cl.Get(ctx, client.ObjectKeyFromObject(actual), actual)).NotTo(HaveOccurred()) + + objOriginal.APIVersion = actual.APIVersion + objOriginal.Kind = actual.Kind + objOriginal.ResourceVersion = actual.ResourceVersion + objOriginal.Status.NodeInfo.MachineID = machineIDFromStatusUpdate + Expect(cmp.Diff(objOriginal, actual)).To(BeEmpty()) + }) + + It("should be able to update an object after updating an object's status", func(ctx SpecContext) { + obj := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node", + }, + Spec: corev1.NodeSpec{ + PodCIDR: "old-cidr", + }, + Status: corev1.NodeStatus{ + NodeInfo: corev1.NodeSystemInfo{ + MachineID: "machine-id", + }, + }, + } + cl := NewClientBuilder().WithStatusSubresource(obj).WithObjects(obj).Build() + expectedObj := obj.DeepCopy() + + obj.Status.NodeInfo.MachineID = machineIDFromStatusUpdate + Expect(cl.Status().Update(ctx, obj)).NotTo(HaveOccurred()) + + obj.Annotations = map[string]string{ + "some-annotation-key": "some", + } + expectedObj.Annotations = map[string]string{ + "some-annotation-key": "some", + } + Expect(cl.Update(ctx, obj)).NotTo(HaveOccurred()) + + actual := &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: obj.Name}} + Expect(cl.Get(ctx, client.ObjectKeyFromObject(actual), actual)).NotTo(HaveOccurred()) + + expectedObj.APIVersion = actual.APIVersion + expectedObj.Kind = actual.Kind + expectedObj.ResourceVersion = actual.ResourceVersion + expectedObj.Status.NodeInfo.MachineID = machineIDFromStatusUpdate + Expect(cmp.Diff(expectedObj, actual)).To(BeEmpty()) + }) + + It("should be able to update an object's status after updating an object", func(ctx SpecContext) { + obj := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node", + }, + Spec: corev1.NodeSpec{ + PodCIDR: "old-cidr", + }, + Status: corev1.NodeStatus{ + NodeInfo: corev1.NodeSystemInfo{ + MachineID: "machine-id", + }, + }, + } + cl := NewClientBuilder().WithStatusSubresource(obj).WithObjects(obj).Build() + expectedObj := obj.DeepCopy() + + obj.Annotations = map[string]string{ + "some-annotation-key": "some", + } + expectedObj.Annotations = map[string]string{ + "some-annotation-key": "some", + } + Expect(cl.Update(ctx, obj)).NotTo(HaveOccurred()) + + obj.Spec.PodCIDR = cidrFromStatusUpdate + obj.Status.NodeInfo.MachineID = machineIDFromStatusUpdate + Expect(cl.Status().Update(ctx, obj)).NotTo(HaveOccurred()) + + actual := &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: obj.Name}} + Expect(cl.Get(ctx, client.ObjectKeyFromObject(actual), actual)).NotTo(HaveOccurred()) + + expectedObj.APIVersion = actual.APIVersion + expectedObj.Kind = actual.Kind + expectedObj.ResourceVersion = actual.ResourceVersion + expectedObj.Status.NodeInfo.MachineID = machineIDFromStatusUpdate + Expect(cmp.Diff(expectedObj, actual)).To(BeEmpty()) + }) + + It("Should only override status fields of typed objects that have a status subresource on status update", func(ctx SpecContext) { + obj := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node", + }, + Spec: corev1.NodeSpec{ + PodCIDR: "old-cidr", + }, + Status: corev1.NodeStatus{ + NodeInfo: corev1.NodeSystemInfo{ + MachineID: "machine-id", + }, + }, + } + cl := NewClientBuilder().WithStatusSubresource(obj).WithObjects(obj).Build() + objOriginal := obj.DeepCopy() + + obj.Status.Phase = corev1.NodeRunning + Expect(cl.Status().Update(ctx, obj)).NotTo(HaveOccurred()) + + actual := &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: obj.Name}} + Expect(cl.Get(ctx, client.ObjectKeyFromObject(actual), actual)).NotTo(HaveOccurred()) + + objOriginal.APIVersion = actual.APIVersion + objOriginal.Kind = actual.Kind + objOriginal.ResourceVersion = actual.ResourceVersion + Expect(cmp.Diff(objOriginal, actual)).ToNot(BeEmpty()) + Expect(objOriginal.Status.NodeInfo.MachineID).To(Equal(actual.Status.NodeInfo.MachineID)) + Expect(objOriginal.Status.Phase).ToNot(Equal(actual.Status.Phase)) + }) + + It("should be able to change typed objects that have a scale subresource on patch", func(ctx SpecContext) { + obj := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deploy", + }, + } + cl := NewClientBuilder().WithObjects(obj).Build() + objOriginal := obj.DeepCopy() + + patch := []byte(fmt.Sprintf(`{"spec":{"replicas":%d}}`, 2)) + Expect(cl.SubResource("scale").Patch(ctx, obj, client.RawPatch(types.MergePatchType, patch))).NotTo(HaveOccurred()) + + actual := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: obj.Name}} + Expect(cl.Get(ctx, client.ObjectKeyFromObject(actual), actual)).To(Succeed()) + + objOriginal.APIVersion = actual.APIVersion + objOriginal.Kind = actual.Kind + objOriginal.ResourceVersion = actual.ResourceVersion + objOriginal.Spec.Replicas = ptr.To(int32(2)) + Expect(cmp.Diff(objOriginal, actual)).To(BeEmpty()) + }) + + It("should not change the status of typed objects that have a status subresource on patch", func(ctx SpecContext) { + obj := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node", + }, + } + Expect(cl.Create(ctx, obj)).To(Succeed()) + original := obj.DeepCopy() + + obj.Status.Phase = "Running" + Expect(cl.Patch(ctx, obj, client.MergeFrom(original))).To(Succeed()) + + Expect(cl.Get(ctx, client.ObjectKeyFromObject(obj), obj)).To(Succeed()) + + Expect(obj.Status).To(BeEquivalentTo(corev1.PodStatus{})) + }) + + It("should not change non-status field of typed objects that have a status subresource on status patch", func(ctx SpecContext) { + obj := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node", + }, + Spec: corev1.NodeSpec{ + PodCIDR: "old-cidr", + }, + } + cl := NewClientBuilder().WithStatusSubresource(obj).WithObjects(obj).Build() + objOriginal := obj.DeepCopy() + + obj.Spec.PodCIDR = cidrFromStatusUpdate + obj.Status.NodeInfo.MachineID = "machine-id" + Expect(cl.Status().Patch(ctx, obj, client.MergeFrom(objOriginal))).NotTo(HaveOccurred()) + + actual := &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: obj.Name}} + Expect(cl.Get(ctx, client.ObjectKeyFromObject(actual), actual)).NotTo(HaveOccurred()) + + objOriginal.APIVersion = actual.APIVersion + objOriginal.Kind = actual.Kind + objOriginal.ResourceVersion = actual.ResourceVersion + objOriginal.Status.NodeInfo.MachineID = "machine-id" + Expect(cmp.Diff(objOriginal, actual)).To(BeEmpty()) + }) + + It("should not change the status of objects with status subresource when creating through apply ", func(ctx SpecContext) { + obj := corev1applyconfigurations. + Pod("node", ""). + WithStatus( + corev1applyconfigurations.PodStatus().WithPhase("Running"), + ) + + cl := NewClientBuilder().WithStatusSubresource(&corev1.Pod{}).Build() + Expect(cl.Apply(ctx, obj, client.FieldOwner("test"))).To(Succeed()) + + p := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: *obj.Name}} + Expect(cl.Get(ctx, client.ObjectKeyFromObject(p), p)).To(Succeed()) + + Expect(p.Status).To(BeComparableTo(corev1.PodStatus{})) + }) + + It("should not change the status of objects with status subresource when updating through apply ", func(ctx SpecContext) { + + cl := NewClientBuilder().WithStatusSubresource(&corev1.Pod{}).Build() + pod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod"}} + Expect(cl.Create(ctx, pod)).NotTo(HaveOccurred()) + + obj := corev1applyconfigurations. + Pod(pod.Name, ""). + WithStatus( + corev1applyconfigurations.PodStatus().WithPhase("Running"), + ) + Expect(cl.Apply(ctx, obj, client.FieldOwner("test"))).To(Succeed()) + + Expect(cl.Get(ctx, client.ObjectKeyFromObject(pod), pod)).To(Succeed()) + + Expect(pod.Status).To(BeComparableTo(corev1.PodStatus{})) + }) + + It("should only change status on status apply", func(ctx SpecContext) { + initial := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node", + }, + Spec: corev1.NodeSpec{ + PodCIDR: "old-cidr", + }, + } + cl := NewClientBuilder().WithStatusSubresource(&corev1.Node{}).WithObjects(initial).Build() + + ac := corev1applyconfigurations.Node(initial.Name). + WithSpec(corev1applyconfigurations.NodeSpec().WithPodCIDR(initial.Spec.PodCIDR + "-updated")). + WithStatus(corev1applyconfigurations.NodeStatus().WithPhase(corev1.NodeRunning)) + + Expect(cl.Status().Apply(ctx, ac, client.FieldOwner("test-owner"))).To(Succeed()) + + actual := &corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: initial.Name}} + Expect(cl.Get(ctx, client.ObjectKeyFromObject(actual), actual)).To(Succeed()) + + initial.ResourceVersion = actual.ResourceVersion + initial.Status = actual.Status + Expect(initial).To(BeComparableTo(actual)) + }) + + It("should Unmarshal the schemaless object with int64 to preserve ints", func(ctx SpecContext) { + schemeBuilder := &scheme.Builder{GroupVersion: schema.GroupVersion{Group: "test", Version: "v1"}} + schemeBuilder.Register(&WithSchemalessSpec{}) + + scheme := runtime.NewScheme() + Expect(schemeBuilder.AddToScheme(scheme)).NotTo(HaveOccurred()) + + spec := Schemaless{ + "key": int64(1), + } + + obj := &WithSchemalessSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "a-foo", + }, + Spec: spec, + } + cl := NewClientBuilder().WithScheme(scheme).WithStatusSubresource(obj).WithObjects(obj).Build() + + Expect(cl.Update(ctx, obj)).To(Succeed()) + Expect(obj.Spec).To(BeEquivalentTo(spec)) + Expect(cl.Get(ctx, client.ObjectKeyFromObject(obj), obj)).To(Succeed()) + Expect(obj.Spec).To(BeEquivalentTo(spec)) + }) + + It("should Unmarshal the schemaless object with float64 to preserve ints", func(ctx SpecContext) { + schemeBuilder := &scheme.Builder{GroupVersion: schema.GroupVersion{Group: "test", Version: "v1"}} + schemeBuilder.Register(&WithSchemalessSpec{}) + + scheme := runtime.NewScheme() + Expect(schemeBuilder.AddToScheme(scheme)).NotTo(HaveOccurred()) + + spec := Schemaless{ + "key": 1.1, + } + + obj := &WithSchemalessSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "a-foo", + }, + Spec: spec, + } + cl := NewClientBuilder().WithScheme(scheme).WithStatusSubresource(obj).WithObjects(obj).Build() + + Expect(cl.Update(ctx, obj)).To(Succeed()) + Expect(obj.Spec).To(BeEquivalentTo(spec)) + Expect(cl.Get(ctx, client.ObjectKeyFromObject(obj), obj)).To(Succeed()) + Expect(obj.Spec).To(BeEquivalentTo(spec)) + }) + + It("should not change the status of unstructured objects that are configured to have a status subresource on update", func(ctx SpecContext) { + obj := &unstructured.Unstructured{} + obj.SetAPIVersion("foo/v1") + obj.SetKind("Foo") + obj.SetName("a-foo") + + err := unstructured.SetNestedField(obj.Object, map[string]any{"state": "old"}, "status") + Expect(err).NotTo(HaveOccurred()) + + cl := NewClientBuilder().WithStatusSubresource(obj).WithObjects(obj).Build() + + err = unstructured.SetNestedField(obj.Object, map[string]any{"state": "new"}, "status") + Expect(err).ToNot(HaveOccurred()) + + Expect(cl.Update(ctx, obj)).To(Succeed()) + + Expect(cl.Get(ctx, client.ObjectKeyFromObject(obj), obj)).To(Succeed()) + + Expect(obj.Object["status"]).To(BeEquivalentTo(map[string]any{"state": "old"})) + }) + + It("should not change non-status fields of unstructured objects that are configured to have a status subresource on status update", func(ctx SpecContext) { + obj := &unstructured.Unstructured{} + obj.SetAPIVersion("foo/v1") + obj.SetKind("Foo") + obj.SetName("a-foo") + + err := unstructured.SetNestedField(obj.Object, "original", "spec") + Expect(err).NotTo(HaveOccurred()) + + cl := NewClientBuilder().WithStatusSubresource(obj).WithObjects(obj).Build() + + err = unstructured.SetNestedField(obj.Object, "from-status-update", "spec") + Expect(err).NotTo(HaveOccurred()) + err = unstructured.SetNestedField(obj.Object, map[string]any{"state": "new"}, "status") + Expect(err).ToNot(HaveOccurred()) + + Expect(cl.Status().Update(ctx, obj)).To(Succeed()) + Expect(cl.Get(ctx, client.ObjectKeyFromObject(obj), obj)).To(Succeed()) + + Expect(obj.Object["status"]).To(BeEquivalentTo(map[string]any{"state": "new"})) + Expect(obj.Object["spec"]).To(BeEquivalentTo("original")) + }) + + It("should not change the status of known unstructured objects that have a status subresource on update", func(ctx SpecContext) { + obj := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyAlways, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodPending, + }, + } + cl := NewClientBuilder().WithStatusSubresource(obj).WithObjects(obj).Build() + + // update using unstructured + u := &unstructured.Unstructured{} + u.SetAPIVersion("v1") + u.SetKind("Pod") + u.SetName(obj.Name) + err := cl.Get(ctx, client.ObjectKeyFromObject(u), u) + Expect(err).NotTo(HaveOccurred()) + + err = unstructured.SetNestedField(u.Object, string(corev1.RestartPolicyNever), "spec", "restartPolicy") + Expect(err).NotTo(HaveOccurred()) + err = unstructured.SetNestedField(u.Object, string(corev1.PodRunning), "status", "phase") + Expect(err).NotTo(HaveOccurred()) + + Expect(cl.Update(ctx, u)).To(Succeed()) + + actual := &corev1.Pod{} + Expect(cl.Get(ctx, client.ObjectKeyFromObject(obj), actual)).To(Succeed()) + obj.ResourceVersion = actual.ResourceVersion + // only the spec mutation should persist + obj.Spec.RestartPolicy = corev1.RestartPolicyNever + Expect(cmp.Diff(obj, actual)).To(BeEmpty()) + }) + + It("should not change non-status field of known unstructured objects that have a status subresource on status update", func(ctx SpecContext) { + obj := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyAlways, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodPending, + }, + } + cl := NewClientBuilder().WithStatusSubresource(obj).WithObjects(obj).Build() + + // status update using unstructured + u := &unstructured.Unstructured{} + u.SetAPIVersion("v1") + u.SetKind("Pod") + u.SetName(obj.Name) + err := cl.Get(ctx, client.ObjectKeyFromObject(u), u) + Expect(err).NotTo(HaveOccurred()) + + err = unstructured.SetNestedField(u.Object, string(corev1.RestartPolicyNever), "spec", "restartPolicy") + Expect(err).NotTo(HaveOccurred()) + err = unstructured.SetNestedField(u.Object, string(corev1.PodRunning), "status", "phase") + Expect(err).NotTo(HaveOccurred()) + + Expect(cl.Status().Update(ctx, u)).To(Succeed()) + + actual := &corev1.Pod{} + Expect(cl.Get(ctx, client.ObjectKeyFromObject(obj), actual)).To(Succeed()) + obj.ResourceVersion = actual.ResourceVersion + // only the status mutation should persist + obj.Status.Phase = corev1.PodRunning + Expect(cmp.Diff(obj, actual)).To(BeEmpty()) + }) + + It("should not change the status of unstructured objects that are configured to have a status subresource on patch", func(ctx SpecContext) { + obj := &unstructured.Unstructured{} + obj.SetAPIVersion("foo/v1") + obj.SetKind("Foo") + obj.SetName("a-foo") + cl := NewClientBuilder().WithStatusSubresource(obj).Build() + + Expect(cl.Create(ctx, obj)).To(Succeed()) + original := obj.DeepCopy() + + err := unstructured.SetNestedField(obj.Object, map[string]interface{}{"count": int64(2)}, "status") + Expect(err).ToNot(HaveOccurred()) + Expect(cl.Patch(ctx, obj, client.MergeFrom(original))).To(Succeed()) + + Expect(cl.Get(ctx, client.ObjectKeyFromObject(obj), obj)).To(Succeed()) + + Expect(obj.Object["status"]).To(BeNil()) + + }) + + It("should not change non-status fields of unstructured objects that are configured to have a status subresource on status patch", func(ctx SpecContext) { + obj := &unstructured.Unstructured{} + obj.SetAPIVersion("foo/v1") + obj.SetKind("Foo") + obj.SetName("a-foo") + + err := unstructured.SetNestedField(obj.Object, "original", "spec") + Expect(err).NotTo(HaveOccurred()) + + cl := NewClientBuilder().WithStatusSubresource(obj).WithObjects(obj).Build() + original := obj.DeepCopy() + + err = unstructured.SetNestedField(obj.Object, "from-status-update", "spec") + Expect(err).NotTo(HaveOccurred()) + err = unstructured.SetNestedField(obj.Object, map[string]any{"state": "new"}, "status") + Expect(err).ToNot(HaveOccurred()) + + Expect(cl.Status().Patch(ctx, obj, client.MergeFrom(original))).To(Succeed()) + Expect(cl.Get(ctx, client.ObjectKeyFromObject(obj), obj)).To(Succeed()) + + Expect(obj.Object["status"]).To(BeEquivalentTo(map[string]any{"state": "new"})) + Expect(obj.Object["spec"]).To(BeEquivalentTo("original")) + }) + + It("should return not found on status update of resources that don't have a status subresource", func(ctx SpecContext) { + obj := &unstructured.Unstructured{} + obj.SetAPIVersion("foo/v1") + obj.SetKind("Foo") + obj.SetName("a-foo") + + cl := NewClientBuilder().WithObjects(obj).Build() + + err := cl.Status().Update(ctx, obj) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + evictionTypes := []client.Object{ + &policyv1beta1.Eviction{}, + &policyv1.Eviction{}, + } + for _, tp := range evictionTypes { + It("should delete a pod through the eviction subresource", func(ctx SpecContext) { + pod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo"}} + + cl := NewClientBuilder().WithObjects(pod).Build() + + err := cl.SubResource("eviction").Create(ctx, pod, tp) + Expect(err).NotTo(HaveOccurred()) + + err = cl.Get(ctx, client.ObjectKeyFromObject(pod), pod) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + It("should return not found when attempting to evict a pod that doesn't exist", func(ctx SpecContext) { + cl := NewClientBuilder().Build() + + pod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo"}} + err := cl.SubResource("eviction").Create(ctx, pod, tp) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + It("should return not found when attempting to evict something other than a pod", func(ctx SpecContext) { + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "foo"}} + cl := NewClientBuilder().WithObjects(ns).Build() + + err := cl.SubResource("eviction").Create(ctx, ns, tp) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + It("should return an error when using the wrong subresource", func(ctx SpecContext) { + cl := NewClientBuilder().Build() + + err := cl.SubResource("eviction-subresource").Create(ctx, &corev1.Namespace{}, tp) + Expect(err).To(HaveOccurred()) + }) + } + + It("should error when creating an eviction with the wrong type", func(ctx SpecContext) { + cl := NewClientBuilder().Build() + err := cl.SubResource("eviction").Create(ctx, &corev1.Pod{}, &corev1.Namespace{}) + Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + + It("should create a ServiceAccount token through the token subresource", func(ctx SpecContext) { + sa := &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "foo"}} + cl := NewClientBuilder().WithObjects(sa).Build() + + tokenRequest := &authenticationv1.TokenRequest{} + err := cl.SubResource("token").Create(ctx, sa, tokenRequest) + Expect(err).NotTo(HaveOccurred()) + + Expect(tokenRequest.Status.Token).NotTo(Equal("")) + Expect(tokenRequest.Status.ExpirationTimestamp).NotTo(Equal(metav1.Time{})) + }) + + It("should return not found when creating a token for a ServiceAccount that doesn't exist", func(ctx SpecContext) { + sa := &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "foo"}} + cl := NewClientBuilder().Build() + + err := cl.SubResource("token").Create(ctx, sa, &authenticationv1.TokenRequest{}) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + It("should error when creating a token with the wrong subresource type", func(ctx SpecContext) { + cl := NewClientBuilder().Build() + err := cl.SubResource("token").Create(ctx, &corev1.ServiceAccount{}, &corev1.Namespace{}) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsBadRequest(err)).To(BeTrue()) + }) + + It("should error when creating a token with the wrong type", func(ctx SpecContext) { + cl := NewClientBuilder().Build() + err := cl.SubResource("token").Create(ctx, &corev1.Secret{}, &authenticationv1.TokenRequest{}) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }) + + It("should leave typemeta empty on typed get", func(ctx SpecContext) { + cl := NewClientBuilder().WithObjects(&corev1.Pod{ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "foo", + }}).Build() + + var pod corev1.Pod + Expect(cl.Get(ctx, client.ObjectKey{Namespace: "default", Name: "foo"}, &pod)).NotTo(HaveOccurred()) + + Expect(pod.TypeMeta).To(Equal(metav1.TypeMeta{})) + }) + + It("should leave typemeta empty on typed list", func(ctx SpecContext) { + cl := NewClientBuilder().WithObjects(&corev1.Pod{ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "foo", + }}).Build() + + var podList corev1.PodList + Expect(cl.List(ctx, &podList)).NotTo(HaveOccurred()) + Expect(podList.ListMeta).To(Equal(metav1.ListMeta{})) + Expect(podList.Items[0].TypeMeta).To(Equal(metav1.TypeMeta{})) + }) + + It("should allow concurrent patches to a configMap", func(ctx SpecContext) { + scheme := runtime.NewScheme() + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + ResourceVersion: "0", + }, + } + cl := NewClientBuilder().WithScheme(scheme).WithObjects(obj).Build() + + const tries = 50 + wg := sync.WaitGroup{} + wg.Add(tries) + + for i := range tries { + go func() { + defer wg.Done() + defer GinkgoRecover() + + newObj := obj.DeepCopy() + newObj.Data = map[string]string{"foo": strconv.Itoa(i)} + Expect(cl.Patch(ctx, newObj, client.MergeFrom(obj))).To(Succeed()) + }() + } + wg.Wait() + + // While the order is not deterministic, there must be $tries distinct updates + // that each increment the resource version by one + Expect(cl.Get(ctx, client.ObjectKey{Name: "foo"}, obj)).To(Succeed()) + Expect(obj.ResourceVersion).To(Equal(strconv.Itoa(tries))) + }) + + It("should not allow concurrent patches to a configMap if the patch contains a ResourceVersion", func(ctx SpecContext) { + scheme := runtime.NewScheme() + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + ResourceVersion: "0", + }, + } + cl := NewClientBuilder().WithScheme(scheme).WithObjects(obj).Build() + wg := sync.WaitGroup{} + wg.Add(5) + + for i := range 5 { + go func() { + defer wg.Done() + defer GinkgoRecover() + + newObj := obj.DeepCopy() + newObj.ResourceVersion = "1" // include an invalid RV to cause a conflict + newObj.Data = map[string]string{"foo": strconv.Itoa(i)} + Expect(apierrors.IsConflict(cl.Patch(ctx, newObj, client.MergeFrom(obj)))).To(BeTrue()) + }() + } + wg.Wait() + }) + + It("should allow concurrent updates to an object that allows unconditionalUpdate if the incoming request has no RV", func(ctx SpecContext) { + scheme := runtime.NewScheme() + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + ResourceVersion: "0", + }, + } + cl := NewClientBuilder().WithScheme(scheme).WithObjects(obj).Build() + + const tries = 50 + wg := sync.WaitGroup{} + wg.Add(tries) + + for i := range tries { + go func() { + defer wg.Done() + defer GinkgoRecover() + + newObj := obj.DeepCopy() + newObj.Data = map[string]string{"foo": strconv.Itoa(i)} + newObj.ResourceVersion = "" + Expect(cl.Update(ctx, newObj)).To(Succeed()) + }() + } + wg.Wait() + + // While the order is not deterministic, there must be $tries distinct updates + // that each increment the resource version by one + Expect(cl.Get(ctx, client.ObjectKey{Name: "foo"}, obj)).To(Succeed()) + Expect(obj.ResourceVersion).To(Equal(strconv.Itoa(tries))) + }) + + It("If a create races with an update for an object that allows createOnUpdate, the update should always succeed", func(ctx SpecContext) { + scheme := runtime.NewScheme() + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + + cl := NewClientBuilder().WithScheme(scheme).Build() + + const tries = 50 + wg := sync.WaitGroup{} + wg.Add(tries * 2) + + for i := range tries { + obj := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: strconv.Itoa(i), + }, + } + go func() { + defer wg.Done() + defer GinkgoRecover() + + // this may or may not succeed depending on if we win the race. Either is acceptable, + // but if it fails, it must fail due to an AlreadyExists. + err := cl.Create(ctx, obj.DeepCopy()) + if err != nil { + Expect(apierrors.IsAlreadyExists(err)).To(BeTrue()) + } + }() + + go func() { + defer wg.Done() + defer GinkgoRecover() + + // This must always succeed, regardless of the outcome of the create. + Expect(cl.Update(ctx, obj.DeepCopy())).To(Succeed()) + }() + } + + wg.Wait() + }) + + It("If a delete races with an update for an object that allows createOnUpdate, the update should always succeed", func(ctx SpecContext) { + scheme := runtime.NewScheme() + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + + cl := NewClientBuilder().WithScheme(scheme).Build() + + const tries = 50 + wg := sync.WaitGroup{} + wg.Add(tries * 2) + + for i := range tries { + obj := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: strconv.Itoa(i), + }, + } + Expect(cl.Create(ctx, obj.DeepCopy())).To(Succeed()) + + go func() { + defer wg.Done() + defer GinkgoRecover() + + Expect(cl.Delete(ctx, obj.DeepCopy())).To(Succeed()) + }() + + go func() { + defer wg.Done() + defer GinkgoRecover() + + // This must always succeed, regardless of if the delete came before or + // after us. + Expect(cl.Update(ctx, obj.DeepCopy())).To(Succeed()) + }() + } + + wg.Wait() + }) + + It("If a DeleteAllOf races with a delete, the DeleteAllOf should always succeed", func(ctx SpecContext) { + scheme := runtime.NewScheme() + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + + cl := NewClientBuilder().WithScheme(scheme).Build() + + const objects = 50 + wg := sync.WaitGroup{} + wg.Add(objects) + + for i := range objects { + obj := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: strconv.Itoa(i), + }, + } + Expect(cl.Create(ctx, obj.DeepCopy())).To(Succeed()) + } + + for i := range objects { + obj := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: strconv.Itoa(i), + }, + } + + go func() { + defer wg.Done() + defer GinkgoRecover() + + // This may or may not succeed depending on if the DeleteAllOf is faster, + // but if it fails, it should be a not found. + err := cl.Delete(ctx, obj) + if err != nil { + Expect(apierrors.IsNotFound(err)).To(BeTrue()) + } + }() + } + Expect(cl.DeleteAllOf(ctx, &corev1.Service{})).To(Succeed()) + + wg.Wait() + }) + + It("If an update races with a scale update, only one of them succeeds", func(ctx SpecContext) { + scheme := runtime.NewScheme() + Expect(appsv1.AddToScheme(scheme)).To(Succeed()) + + cl := NewClientBuilder().WithScheme(scheme).Build() + + const tries = 5000 + for i := range tries { + dep := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: strconv.Itoa(i), + }, + } + Expect(cl.Create(ctx, dep)).To(Succeed()) + + wg := sync.WaitGroup{} + wg.Add(2) + var updateSucceeded bool + var scaleSucceeded bool + + go func() { + defer wg.Done() + defer GinkgoRecover() + + dep := dep.DeepCopy() + dep.Annotations = map[string]string{"foo": "bar"} + + // This may or may not fail. If it does fail, it must be a conflict. + err := cl.Update(ctx, dep) + if err != nil { + Expect(apierrors.IsConflict(err)).To(BeTrue()) + } else { + updateSucceeded = true + } + }() + + go func() { + defer wg.Done() + defer GinkgoRecover() + + // This may or may not fail. If it does fail, it must be a conflict. + scale := &autoscalingv1.Scale{Spec: autoscalingv1.ScaleSpec{Replicas: 10}} + err := cl.SubResource("scale").Update(ctx, dep.DeepCopy(), client.WithSubResourceBody(scale)) + if err != nil { + Expect(apierrors.IsConflict(err)).To(BeTrue()) + } else { + scaleSucceeded = true + } + }() + + wg.Wait() + Expect(updateSucceeded).ToNot(Equal(scaleSucceeded)) + } + + }) + + It("disallows scale subresources on unsupported built-in types", func(ctx SpecContext) { + scheme := runtime.NewScheme() + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + Expect(apiextensions.AddToScheme(scheme)).To(Succeed()) + + obj := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + } + cl := NewClientBuilder().WithScheme(scheme).WithObjects(obj).Build() + + scale := &autoscalingv1.Scale{Spec: autoscalingv1.ScaleSpec{Replicas: 2}} + expectedErr := "unimplemented scale subresource for resource *v1.Pod" + Expect(cl.SubResource(subResourceScale).Get(ctx, obj, scale).Error()).To(Equal(expectedErr)) + Expect(cl.SubResource(subResourceScale).Update(ctx, obj, client.WithSubResourceBody(scale)).Error()).To(Equal(expectedErr)) + }) + + It("disallows scale subresources on non-existing objects", func(ctx SpecContext) { + obj := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To[int32](2), + }, + } + cl := NewClientBuilder().Build() + + scale := &autoscalingv1.Scale{Spec: autoscalingv1.ScaleSpec{Replicas: 2}} + expectedErr := "deployments.apps \"foo\" not found" + Expect(cl.SubResource(subResourceScale).Get(ctx, obj, scale).Error()).To(Equal(expectedErr)) + Expect(cl.SubResource(subResourceScale).Update(ctx, obj, client.WithSubResourceBody(scale)).Error()).To(Equal(expectedErr)) + }) + + It("clears typemeta from structured objects on create", func(ctx SpecContext) { + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + } + cl := NewClientBuilder().Build() + Expect(cl.Create(ctx, obj)).To(Succeed()) + Expect(obj.TypeMeta).To(Equal(metav1.TypeMeta{})) + }) + + It("clears typemeta from structured objects on update", func(ctx SpecContext) { + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + } + cl := NewClientBuilder().WithObjects(obj).Build() + Expect(cl.Update(ctx, obj)).To(Succeed()) + Expect(obj.TypeMeta).To(Equal(metav1.TypeMeta{})) + }) + + It("clears typemeta from structured objects on patch", func(ctx SpecContext) { + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + } + cl := NewClientBuilder().WithObjects(obj).Build() + original := obj.DeepCopy() + obj.TypeMeta = metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + } + Expect(cl.Patch(ctx, obj, client.MergeFrom(original))).To(Succeed()) + Expect(obj.TypeMeta).To(Equal(metav1.TypeMeta{})) + }) + + It("clears typemeta from structured objects on get", func(ctx SpecContext) { + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + } + cl := NewClientBuilder().WithObjects(obj).Build() + target := &corev1.ConfigMap{} + Expect(cl.Get(ctx, client.ObjectKeyFromObject(obj), target)).To(Succeed()) + Expect(target.TypeMeta).To(Equal(metav1.TypeMeta{})) + }) + + It("clears typemeta from structured objects on list", func(ctx SpecContext) { + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + } + cl := NewClientBuilder().WithObjects(obj).Build() + target := &corev1.ConfigMapList{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + } + Expect(cl.List(ctx, target)).To(Succeed()) + Expect(target.TypeMeta).To(Equal(metav1.TypeMeta{})) + Expect(target.Items[0].TypeMeta).To(Equal(metav1.TypeMeta{})) + }) + + It("is threadsafe", func(ctx SpecContext) { + cl := NewClientBuilder().Build() + + u := func() *unstructured.Unstructured { + u := &unstructured.Unstructured{} + u.SetAPIVersion("custom/v1") + u.SetKind("Version") + u.SetName("foo") + return u + } + + uList := func() *unstructured.UnstructuredList { + u := &unstructured.UnstructuredList{} + u.SetAPIVersion("custom/v1") + u.SetKind("Version") + + return u + } + + meta := func() *metav1.PartialObjectMetadata { + return &metav1.PartialObjectMetadata{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: "custom/v1", + Kind: "Version", + }, + } + } + metaList := func() *metav1.PartialObjectMetadataList { + return &metav1.PartialObjectMetadataList{ + TypeMeta: metav1.TypeMeta{ + + APIVersion: "custom/v1", + Kind: "Version", + }, + } + } + + pod := func() *corev1.Pod { + return &corev1.Pod{ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + }} + } + + ops := []func(){ + func() { _ = cl.Create(ctx, u()) }, + func() { _ = cl.Get(ctx, client.ObjectKeyFromObject(u()), u()) }, + func() { _ = cl.Update(ctx, u()) }, + func() { _ = cl.Patch(ctx, u(), client.RawPatch(types.StrategicMergePatchType, []byte("foo"))) }, + func() { _ = cl.Delete(ctx, u()) }, + func() { _ = cl.DeleteAllOf(ctx, u(), client.HasLabels{"foo"}) }, + func() { _ = cl.List(ctx, uList()) }, + + func() { _ = cl.Create(ctx, meta()) }, + func() { _ = cl.Get(ctx, client.ObjectKeyFromObject(meta()), meta()) }, + func() { _ = cl.Update(ctx, meta()) }, + func() { _ = cl.Patch(ctx, meta(), client.RawPatch(types.StrategicMergePatchType, []byte("foo"))) }, + func() { _ = cl.Delete(ctx, meta()) }, + func() { _ = cl.DeleteAllOf(ctx, meta(), client.HasLabels{"foo"}) }, + func() { _ = cl.List(ctx, metaList()) }, + + func() { _ = cl.Create(ctx, pod()) }, + func() { _ = cl.Get(ctx, client.ObjectKeyFromObject(pod()), pod()) }, + func() { _ = cl.Update(ctx, pod()) }, + func() { _ = cl.Patch(ctx, pod(), client.RawPatch(types.StrategicMergePatchType, []byte("foo"))) }, + func() { _ = cl.Delete(ctx, pod()) }, + func() { _ = cl.DeleteAllOf(ctx, pod(), client.HasLabels{"foo"}) }, + func() { _ = cl.List(ctx, &corev1.PodList{}) }, + } + + wg := sync.WaitGroup{} + wg.Add(len(ops)) + for _, op := range ops { + go func() { + defer wg.Done() + op() + }() + } + + wg.Wait() + }) + + DescribeTable("mutating operations return the updated object", + func(ctx SpecContext, mutate func(ctx SpecContext) (*corev1.ConfigMap, error)) { + mutated, err := mutate(ctx) + Expect(err).NotTo(HaveOccurred()) + + var retrieved corev1.ConfigMap + Expect(cl.Get(ctx, client.ObjectKeyFromObject(mutated), &retrieved)).To(Succeed()) + + Expect(&retrieved).To(BeComparableTo(mutated)) + }, + + Entry("create", func(ctx SpecContext) (*corev1.ConfigMap, error) { + cl = NewClientBuilder().Build() + cm.ResourceVersion = "" + return cm, cl.Create(ctx, cm) + }), + Entry("update", func(ctx SpecContext) (*corev1.ConfigMap, error) { + cl = NewClientBuilder().WithObjects(cm).Build() + cm.Labels = map[string]string{"updated-label": "update-test"} + cm.Data["new-key"] = "new-value" + return cm, cl.Update(ctx, cm) + }), + Entry("patch", func(ctx SpecContext) (*corev1.ConfigMap, error) { + cl = NewClientBuilder().WithObjects(cm).Build() + original := cm.DeepCopy() + + cm.Labels = map[string]string{"updated-label": "update-test"} + cm.Data["new-key"] = "new-value" + return cm, cl.Patch(ctx, cm, client.MergeFrom(original)) + }), + Entry("Create through Apply", func(ctx SpecContext) (*corev1.ConfigMap, error) { + ac := corev1applyconfigurations.ConfigMap(cm.Name, cm.Namespace).WithData(cm.Data) + + cl = NewClientBuilder().Build() + Expect(cl.Apply(ctx, ac, client.FieldOwner("foo"))).To(Succeed()) + + serialized, err := json.Marshal(ac) + Expect(err).NotTo(HaveOccurred()) + + var cm corev1.ConfigMap + Expect(json.Unmarshal(serialized, &cm)).To(Succeed()) + + // ApplyConfigurations always have TypeMeta set as they do not support using the scheme + // to retrieve gvk. + cm.TypeMeta = metav1.TypeMeta{} + return &cm, nil + }), + Entry("Update through Apply", func(ctx SpecContext) (*corev1.ConfigMap, error) { + ac := corev1applyconfigurations.ConfigMap(cm.Name, cm.Namespace). + WithLabels(map[string]string{"updated-label": "update-test"}). + WithData(map[string]string{"new-key": "new-value"}) + + cl = NewClientBuilder().WithObjects(cm).Build() + Expect(cl.Apply(ctx, ac, client.FieldOwner("foo"))).To(Succeed()) + + serialized, err := json.Marshal(ac) + Expect(err).NotTo(HaveOccurred()) + + var cm corev1.ConfigMap + Expect(json.Unmarshal(serialized, &cm)).To(Succeed()) + + // ApplyConfigurations always have TypeMeta set as they do not support using the scheme + // to retrieve gvk. + cm.TypeMeta = metav1.TypeMeta{} + return &cm, nil + }), + ) + + It("supports server-side apply of a client-go resource", func(ctx SpecContext) { + cl := NewClientBuilder().Build() + obj := &unstructured.Unstructured{} + obj.SetAPIVersion("v1") + obj.SetKind("ConfigMap") + obj.SetName("foo") + Expect(unstructured.SetNestedField(obj.Object, map[string]any{"some": "data"}, "data")).To(Succeed()) + + Expect(cl.Patch(ctx, obj, client.Apply, client.FieldOwner("foo"))).To(Succeed()) //nolint:staticcheck // will be removed once client.Apply is removed + + cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "foo"}} + + Expect(cl.Get(ctx, client.ObjectKeyFromObject(cm), cm)).To(Succeed()) + Expect(cm.Data).To(Equal(map[string]string{"some": "data"})) + + Expect(unstructured.SetNestedField(obj.Object, map[string]any{"other": "data"}, "data")).To(Succeed()) + Expect(cl.Patch(ctx, obj, client.Apply, client.FieldOwner("foo"))).To(Succeed()) //nolint:staticcheck // will be removed once client.Apply is removed + + Expect(cl.Get(ctx, client.ObjectKeyFromObject(cm), cm)).To(Succeed()) + Expect(cm.Data).To(Equal(map[string]string{"other": "data"})) + }) + + It("supports server-side apply of a custom resource", func(ctx SpecContext) { + cl := NewClientBuilder().Build() + obj := &unstructured.Unstructured{} + obj.SetAPIVersion("custom/v1") + obj.SetKind("FakeResource") + obj.SetName("foo") + result := obj.DeepCopy() + + Expect(unstructured.SetNestedField(obj.Object, map[string]any{"some": "data"}, "spec")).To(Succeed()) + + Expect(cl.Patch(ctx, obj, client.Apply, client.FieldOwner("foo"))).To(Succeed()) //nolint:staticcheck // will be removed once client.Apply is removed + + Expect(cl.Get(ctx, client.ObjectKeyFromObject(result), result)).To(Succeed()) + Expect(result.Object["spec"]).To(Equal(map[string]any{"some": "data"})) + + Expect(unstructured.SetNestedField(obj.Object, map[string]any{"other": "data"}, "spec")).To(Succeed()) + Expect(cl.Patch(ctx, obj, client.Apply, client.FieldOwner("foo"))).To(Succeed()) //nolint:staticcheck // will be removed once client.Apply is removed + + Expect(cl.Get(ctx, client.ObjectKeyFromObject(result), result)).To(Succeed()) + Expect(result.Object["spec"]).To(Equal(map[string]any{"other": "data"})) + }) + + It("errors out when doing SSA with managedFields set", func(ctx SpecContext) { + cl := NewClientBuilder().WithReturnManagedFields().Build() + obj := &unstructured.Unstructured{} + obj.SetAPIVersion("v1") + obj.SetKind("ConfigMap") + obj.SetName("foo") + Expect(unstructured.SetNestedField(obj.Object, map[string]any{"some": "data"}, "data")).To(Succeed()) + + Expect(cl.Patch(ctx, obj, client.Apply, client.FieldOwner("foo"))).To(Succeed()) //nolint:staticcheck // will be removed once client.Apply is removed + + err := cl.Patch(ctx, obj, client.Apply, client.FieldOwner("foo")) //nolint:staticcheck // will be removed once client.Apply is removed + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("metadata.managedFields must be nil")) + }) + + It("supports server-side apply using a custom type converter", func(ctx SpecContext) { + cl := NewClientBuilder(). + WithTypeConverters(clientgoapplyconfigurations.NewTypeConverter(clientgoscheme.Scheme)). + Build() + obj := &unstructured.Unstructured{} + obj.SetAPIVersion("v1") + obj.SetKind("ConfigMap") + obj.SetName("foo") + + Expect(unstructured.SetNestedField(obj.Object, map[string]any{"some": "data"}, "data")).To(Succeed()) + + Expect(cl.Patch(ctx, obj, client.Apply, client.FieldOwner("foo"))).To(Succeed()) //nolint:staticcheck // will be removed once client.Apply is removed + + cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "foo"}} + + Expect(cl.Get(ctx, client.ObjectKeyFromObject(cm), cm)).To(Succeed()) + Expect(cm.Data).To(Equal(map[string]string{"some": "data"})) + + Expect(unstructured.SetNestedField(obj.Object, map[string]any{"other": "data"}, "data")).To(Succeed()) + Expect(cl.Patch(ctx, obj, client.Apply, client.FieldOwner("foo"))).To(Succeed()) //nolint:staticcheck // will be removed once client.Apply is removed + + Expect(cl.Get(ctx, client.ObjectKeyFromObject(cm), cm)).To(Succeed()) + Expect(cm.Data).To(Equal(map[string]string{"other": "data"})) + }) + + It("returns managedFields if configured to do so", func(ctx SpecContext) { + cl := NewClientBuilder().WithReturnManagedFields().Build() + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "some-cm", + Namespace: "default", + }, + Data: map[string]string{ + "initial": "data", + }, + } + Expect(cl.Create(ctx, cm)).NotTo(HaveOccurred()) + Expect(cm.ManagedFields).NotTo(BeNil()) + + retrieved := &corev1.ConfigMap{} + Expect(cl.Get(ctx, client.ObjectKeyFromObject(cm), retrieved)).NotTo(HaveOccurred()) + Expect(retrieved.ManagedFields).NotTo(BeNil()) + + cm.Data["another"] = "value" + cm.SetManagedFields(nil) + Expect(cl.Update(ctx, cm)).NotTo(HaveOccurred()) + Expect(cm.ManagedFields).NotTo(BeNil()) + + cm.SetManagedFields(nil) + beforePatch := cm.DeepCopy() + cm.Data["a-third"] = "value" + Expect(cl.Patch(ctx, cm, client.MergeFrom(beforePatch))).NotTo(HaveOccurred()) + Expect(cm.ManagedFields).NotTo(BeNil()) + + u := &unstructured.Unstructured{Object: map[string]any{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]any{ + "name": cm.Name, + "namespace": cm.Namespace, + }, + "data": map[string]any{ + "ssa": "value", + }, + }} + Expect(cl.Patch(ctx, u, client.Apply, client.FieldOwner("foo"))).NotTo(HaveOccurred()) //nolint:staticcheck // will be removed once client.Apply is removed + _, exists, err := unstructured.NestedFieldNoCopy(u.Object, "metadata", "managedFields") + Expect(err).NotTo(HaveOccurred()) + Expect(exists).To(BeTrue()) + + var list corev1.ConfigMapList + Expect(cl.List(ctx, &list)).NotTo(HaveOccurred()) + for _, item := range list.Items { + Expect(item.ManagedFields).NotTo(BeNil()) + } + }) + + It("clears managedFields from objects in a list", func(ctx SpecContext) { + cl := NewClientBuilder().Build() + obj := corev1applyconfigurations. + ConfigMap("foo", "default"). + WithData(map[string]string{"some": "data"}) + + Expect(cl.Apply(ctx, obj, &client.ApplyOptions{FieldManager: "test-manager"})).To(Succeed()) + + var list corev1.ConfigMapList + Expect(cl.List(ctx, &list)).NotTo(HaveOccurred()) + for _, item := range list.Items { + Expect(item.ManagedFields).To(BeNil()) + } + }) + + It("supports server-side apply of a client-go resource via Apply method", func(ctx SpecContext) { + cl := NewClientBuilder().Build() + obj := corev1applyconfigurations. + ConfigMap("foo", "default"). + WithData(map[string]string{"some": "data"}) + + Expect(cl.Apply(ctx, obj, &client.ApplyOptions{FieldManager: "test-manager"})).To(Succeed()) + + cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} + + Expect(cl.Get(ctx, client.ObjectKeyFromObject(cm), cm)).To(Succeed()) + Expect(cm.Data).To(BeComparableTo(map[string]string{"some": "data"})) + + obj.Data = map[string]string{"other": "data"} + Expect(cl.Apply(ctx, obj, &client.ApplyOptions{FieldManager: "test-manager"})).To(Succeed()) + + Expect(cl.Get(ctx, client.ObjectKeyFromObject(cm), cm)).To(Succeed()) + Expect(cm.Data).To(BeComparableTo(map[string]string{"other": "data"})) + }) + + It("returns a conflict when trying to Create an object with UID set through Apply", func(ctx SpecContext) { + cl := NewClientBuilder().Build() + obj := corev1applyconfigurations. + ConfigMap("foo", "default"). + WithUID("123") + + err := cl.Apply(ctx, obj, &client.ApplyOptions{FieldManager: "test-manager"}) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsConflict(err)).To(BeTrue()) + }) + + It("errors when trying to server-side apply an object without configuring a FieldManager", func(ctx SpecContext) { + cl := NewClientBuilder().Build() + obj := corev1applyconfigurations. + ConfigMap("foo", "default"). + WithData(map[string]string{"some": "data"}) + + err := cl.Apply(ctx, obj) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsInvalid(err)).To(BeTrue(), "Expected error to be an invalid error") + }) + + It("errors when trying to server-side apply an object with an invalid FieldManager", func(ctx SpecContext) { + cl := NewClientBuilder().Build() + obj := corev1applyconfigurations. + ConfigMap("foo", "default"). + WithData(map[string]string{"some": "data"}) + + err := cl.Apply(ctx, obj, client.FieldOwner("\x00")) + Expect(err).To(HaveOccurred()) + Expect(apierrors.IsInvalid(err)).To(BeTrue(), "Expected error to be an invalid error") + }) + + It("supports server-side apply of a custom resource via Apply method", func(ctx SpecContext) { + cl := NewClientBuilder().Build() + obj := &unstructured.Unstructured{} + obj.SetAPIVersion("custom/v1") + obj.SetKind("FakeResource") + obj.SetName("foo") + result := obj.DeepCopy() + + Expect(unstructured.SetNestedField(obj.Object, map[string]any{"some": "data"}, "spec")).To(Succeed()) + + applyConfig := client.ApplyConfigurationFromUnstructured(obj) + Expect(cl.Apply(ctx, applyConfig, &client.ApplyOptions{FieldManager: "test-manager"})).To(Succeed()) + + Expect(cl.Get(ctx, client.ObjectKeyFromObject(result), result)).To(Succeed()) + Expect(result.Object["spec"]).To(Equal(map[string]any{"some": "data"})) + + Expect(unstructured.SetNestedField(obj.Object, map[string]any{"other": "data"}, "spec")).To(Succeed()) + applyConfig2 := client.ApplyConfigurationFromUnstructured(obj) + Expect(cl.Apply(ctx, applyConfig2, &client.ApplyOptions{FieldManager: "test-manager"})).To(Succeed()) + + Expect(cl.Get(ctx, client.ObjectKeyFromObject(result), result)).To(Succeed()) + Expect(result.Object["spec"]).To(Equal(map[string]any{"other": "data"})) + }) + + It("sets the fieldManager in create, patch and update", func(ctx SpecContext) { + owner := "test-owner" + cl := client.WithFieldOwner( + NewClientBuilder().WithReturnManagedFields().Build(), + owner, + ) + + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "foo"}, + Data: map[string]string{"method": "create"}, + } + Expect(cl.Create(ctx, obj)).NotTo(HaveOccurred()) + + Expect(obj.ManagedFields).NotTo(BeEmpty()) + for _, f := range obj.ManagedFields { + Expect(f.Manager).To(BeEquivalentTo(owner)) + } + + originalObj := obj.DeepCopy() + obj.Data["method"] = "patch" + Expect(cl.Patch(ctx, obj, client.MergeFrom(originalObj))).NotTo(HaveOccurred()) + Expect(obj.ManagedFields).NotTo(BeEmpty()) + for _, f := range obj.ManagedFields { + Expect(f.Manager).To(BeEquivalentTo(owner)) + } + + obj.Data["method"] = "update" + Expect(cl.Update(ctx, obj)).NotTo(HaveOccurred()) + Expect(obj.ManagedFields).NotTo(BeEmpty()) + for _, f := range obj.ManagedFields { + Expect(f.Manager).To(BeEquivalentTo(owner)) + } + }) + + It("sets the fieldManager when creating through update", func(ctx SpecContext) { + owner := "test-owner" + cl := client.WithFieldOwner( + NewClientBuilder().WithReturnManagedFields().Build(), + owner, + ) + + obj := &corev1.Event{ObjectMeta: metav1.ObjectMeta{Name: "foo"}} + Expect(cl.Update(ctx, obj, client.FieldOwner(owner))).NotTo(HaveOccurred()) + for _, f := range obj.ManagedFields { + Expect(f.Manager).To(BeEquivalentTo(owner)) + } + }) + + // GH-3267 + It("Doesn't leave stale data when updating an object through SSA", func(ctx SpecContext) { + obj := corev1applyconfigurations. + ConfigMap("foo", "default"). + WithData(map[string]string{"some": "data"}) + + cl := NewClientBuilder().Build() + Expect(cl.Apply(ctx, obj, client.FieldOwner("foo"))).NotTo(HaveOccurred()) + + obj.WithData(map[string]string{"bar": "baz"}) + Expect(cl.Apply(ctx, obj, client.FieldOwner("foo"))).NotTo(HaveOccurred()) + var cms corev1.ConfigMapList + Expect(cl.List(ctx, &cms)).NotTo(HaveOccurred()) + Expect(len(cms.Items)).To(BeEquivalentTo(1)) + }) + + It("sets resourceVersion on SSA create", func(ctx SpecContext) { + obj := corev1applyconfigurations. + ConfigMap("foo", "default"). + WithData(map[string]string{"some": "data"}) + + cl := NewClientBuilder().Build() + Expect(cl.Apply(ctx, obj, client.FieldOwner("foo"))).NotTo(HaveOccurred()) + // Ideally we should only test for it to not be empty, realistically we will + // break ppl if we ever start setting a different value. + Expect(obj.ResourceVersion).To(BeEquivalentTo(ptr.To("1"))) + }) + + It("ignores a passed resourceVersion on SSA create", func(ctx SpecContext) { + obj := corev1applyconfigurations. + ConfigMap("foo", "default"). + WithData(map[string]string{"some": "data"}). + WithResourceVersion("1234") + + cl := NewClientBuilder().Build() + Expect(cl.Apply(ctx, obj, client.FieldOwner("foo"))).NotTo(HaveOccurred()) + Expect(obj.ResourceVersion).To(BeEquivalentTo(ptr.To("1"))) + }) + + It("allows to set deletionTimestamp on an object during SSA create", func(ctx SpecContext) { + now := metav1.Time{Time: time.Now().Round(time.Second)} + obj := corev1applyconfigurations. + ConfigMap("foo", "default"). + WithDeletionTimestamp(now). + WithData(map[string]string{"some": "data"}) + + cl := NewClientBuilder().Build() + Expect(cl.Apply(ctx, obj, client.FieldOwner("foo"))).NotTo(HaveOccurred()) + + Expect(obj.DeletionTimestamp).To(BeEquivalentTo(&now)) + }) + + It("will silently ignore a deletionTimestamp update through SSA", func(ctx SpecContext) { + now := metav1.Time{Time: time.Now().Round(time.Second)} + obj := corev1applyconfigurations. + ConfigMap("foo", "default"). + WithDeletionTimestamp(now). + WithFinalizers("foo.bar"). + WithData(map[string]string{"some": "data"}) + + cl := NewClientBuilder().Build() + Expect(cl.Apply(ctx, obj, client.FieldOwner("foo"))).NotTo(HaveOccurred()) + Expect(obj.DeletionTimestamp).To(BeEquivalentTo(&now)) + + later := metav1.Time{Time: now.Add(time.Second)} + obj.DeletionTimestamp = &later + Expect(cl.Apply(ctx, obj, client.FieldOwner("foo"))).NotTo(HaveOccurred()) + Expect(*obj.DeletionTimestamp).To(BeEquivalentTo(now)) + }) + + It("will error out if an object with invalid managedFields is added", func(ctx SpecContext) { + fieldV1Map := map[string]interface{}{ + "f:metadata": map[string]interface{}{ + "f:name": map[string]interface{}{}, + "f:labels": map[string]interface{}{}, + "f:annotations": map[string]interface{}{}, + "f:finalizers": map[string]interface{}{}, + }, + } + fieldV1, err := json.Marshal(fieldV1Map) + Expect(err).NotTo(HaveOccurred()) + + obj := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{ + Name: "cm-1", + Namespace: "default", + ManagedFields: []metav1.ManagedFieldsEntry{{ + Manager: "my-manager", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: fieldV1}, + }}, + }} + + Expect(func() { + NewClientBuilder().WithObjects(obj).Build() + }).To(PanicWith(MatchError(ContainSubstring("invalid managedFields")))) + }) + + It("allows adding an object with managedFields", func(ctx SpecContext) { + fieldV1Map := map[string]interface{}{ + "f:metadata": map[string]interface{}{ + "f:name": map[string]interface{}{}, + "f:labels": map[string]interface{}{}, + "f:annotations": map[string]interface{}{}, + "f:finalizers": map[string]interface{}{}, + }, + } + fieldV1, err := json.Marshal(fieldV1Map) + Expect(err).NotTo(HaveOccurred()) + + obj := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{ + Name: "cm-1", + Namespace: "default", + ManagedFields: []metav1.ManagedFieldsEntry{{ + Manager: "my-manager", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: fieldV1}, + APIVersion: "v1", + }}, + }} + + NewClientBuilder().WithObjects(obj).Build() + }) + + It("allows adding an object with invalid managedFields when not using the FieldManagedObjectTracker", func(ctx SpecContext) { + fieldV1Map := map[string]interface{}{ + "f:metadata": map[string]interface{}{ + "f:name": map[string]interface{}{}, + "f:labels": map[string]interface{}{}, + "f:annotations": map[string]interface{}{}, + "f:finalizers": map[string]interface{}{}, + }, + } + fieldV1, err := json.Marshal(fieldV1Map) + Expect(err).NotTo(HaveOccurred()) + + obj := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{ + Name: "cm-1", + Namespace: "default", + ManagedFields: []metav1.ManagedFieldsEntry{{ + Manager: "my-manager", + Operation: metav1.ManagedFieldsOperationUpdate, + FieldsType: "FieldsV1", + FieldsV1: &metav1.FieldsV1{Raw: fieldV1}, + }}, + }} + + NewClientBuilder(). + WithObjectTracker(testing.NewObjectTracker( + clientgoscheme.Scheme, + serializer.NewCodecFactory(clientgoscheme.Scheme).UniversalDecoder(), + )). + WithObjects(obj). + Build() + }) + + scalableObjs := []client.Object{ + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To[int32](2), + }, + }, + &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: appsv1.ReplicaSetSpec{ + Replicas: ptr.To[int32](2), + }, + }, + &corev1.ReplicationController{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: corev1.ReplicationControllerSpec{ + Replicas: ptr.To[int32](2), + }, + }, + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: ptr.To[int32](2), + }, + }, + } + for _, obj := range scalableObjs { + It(fmt.Sprintf("should be able to Get scale subresources for resource %T", obj), func(ctx SpecContext) { + cl := NewClientBuilder().WithObjects(obj).Build() + + scaleActual := &autoscalingv1.Scale{} + Expect(cl.SubResource(subResourceScale).Get(ctx, obj, scaleActual)).NotTo(HaveOccurred()) + + scaleExpected := &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Name: obj.GetName(), + UID: obj.GetUID(), + ResourceVersion: obj.GetResourceVersion(), + }, + Spec: autoscalingv1.ScaleSpec{ + Replicas: 2, + }, + } + Expect(cmp.Diff(scaleExpected, scaleActual)).To(BeEmpty()) + }) + + It(fmt.Sprintf("should be able to Update scale subresources for resource %T", obj), func(ctx SpecContext) { + cl := NewClientBuilder().WithObjects(obj).Build() + + scaleExpected := &autoscalingv1.Scale{Spec: autoscalingv1.ScaleSpec{Replicas: 3}} + Expect(cl.SubResource(subResourceScale).Update(ctx, obj, client.WithSubResourceBody(scaleExpected))).NotTo(HaveOccurred()) + + objActual := obj.DeepCopyObject().(client.Object) + Expect(cl.Get(ctx, client.ObjectKeyFromObject(objActual), objActual)).To(Succeed()) + + objExpected := obj.DeepCopyObject().(client.Object) + switch expected := objExpected.(type) { + case *appsv1.Deployment: + expected.ResourceVersion = objActual.GetResourceVersion() + expected.Spec.Replicas = ptr.To(int32(3)) + case *appsv1.ReplicaSet: + expected.ResourceVersion = objActual.GetResourceVersion() + expected.Spec.Replicas = ptr.To(int32(3)) + case *corev1.ReplicationController: + expected.ResourceVersion = objActual.GetResourceVersion() + expected.Spec.Replicas = ptr.To(int32(3)) + case *appsv1.StatefulSet: + expected.ResourceVersion = objActual.GetResourceVersion() + expected.Spec.Replicas = ptr.To(int32(3)) + } + Expect(cmp.Diff(objExpected, objActual)).To(BeEmpty()) + + scaleActual := &autoscalingv1.Scale{} + Expect(cl.SubResource(subResourceScale).Get(ctx, obj, scaleActual)).NotTo(HaveOccurred()) + + // When we called Update, these were derived but we need them now to compare. + scaleExpected.Name = scaleActual.Name + scaleExpected.ResourceVersion = scaleActual.ResourceVersion + Expect(cmp.Diff(scaleExpected, scaleActual)).To(BeEmpty()) + }) + + } +}) + +type Schemaless map[string]interface{} + +type WithSchemalessSpec struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec Schemaless `json:"spec,omitempty"` +} + +func (t *WithSchemalessSpec) DeepCopy() *WithSchemalessSpec { + w := &WithSchemalessSpec{ + ObjectMeta: *t.ObjectMeta.DeepCopy(), + } + w.TypeMeta = metav1.TypeMeta{ + APIVersion: t.APIVersion, + Kind: t.Kind, + } + t.Spec.DeepCopyInto(&w.Spec) + + return w +} + +func (t *WithSchemalessSpec) DeepCopyObject() runtime.Object { + return t.DeepCopy() +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Schemaless) DeepCopyInto(out *Schemaless) { + if *in != nil { + *out = make(Schemaless, len(*in)) + for key := range *in { + (*out)[key] = (*in)[key] + } + } +} + +// DeepCopy copies the receiver, creating a new Schemaless. +func (in *Schemaless) DeepCopy() *Schemaless { + if in == nil { + return nil + } + out := new(Schemaless) + in.DeepCopyInto(out) + return out +} + +var _ = Describe("Fake client builder", func() { + It("panics when an index with the same name and GroupVersionKind is registered twice", func(ctx SpecContext) { + // We need any realistic GroupVersionKind, the choice of apps/v1 Deployment is arbitrary. + cb := NewClientBuilder().WithIndex(&appsv1.Deployment{}, + "test-name", + func(client.Object) []string { return nil }) + + Expect(func() { + cb.WithIndex(&appsv1.Deployment{}, + "test-name", + func(client.Object) []string { return []string{"foo"} }) + }).To(Panic()) + }) + + It("should wrap the fake client with an interceptor when WithInterceptorFuncs is called", func(ctx SpecContext) { + var called bool + cli := NewClientBuilder().WithInterceptorFuncs(interceptor.Funcs{ + Get: func(ctx context.Context, client client.WithWatch, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + called = true + return nil + }, + }).Build() + err := cli.Get(ctx, client.ObjectKey{}, &corev1.Pod{}) + Expect(err).NotTo(HaveOccurred()) + Expect(called).To(BeTrue()) + }) + + It("should panic when calling build more than once", func() { + cb := NewClientBuilder() + anotherCb := cb + cb.Build() + Expect(func() { + anotherCb.Build() + }).To(Panic()) }) }) diff --git a/pkg/client/fake/doc.go b/pkg/client/fake/doc.go index 3b9099fa0a..47cad3980d 100644 --- a/pkg/client/fake/doc.go +++ b/pkg/client/fake/doc.go @@ -17,14 +17,22 @@ limitations under the License. /* Package fake provides a fake client for testing. -An fake client is backed by its simple object store indexed by GroupVersionResource. +A fake client is backed by its simple object store indexed by GroupVersionResource. You can create a fake client with optional objects. - client := NewFakeClient(initObjs...) // initObjs is a slice of runtime.Object + client := NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build() You can invoke the methods defined in the Client interface. -When it doubt, it's almost always better not to use this package and instead use +When in doubt, it's almost always better not to use this package and instead use envtest.Environment with a real client and API server. + +WARNING: ⚠️ Current Limitations / Known Issues with the fake Client ⚠️ + - This client does not have a way to inject specific errors to test handled vs. unhandled errors. + - There is some support for sub resources which can cause issues with tests if you're trying to update + e.g. metadata and status in the same reconcile. + - No OpenAPI validation is performed when creating or updating objects. + - ObjectMeta's `Generation` and `ResourceVersion` don't behave properly, Patch or Update + operations that rely on these fields will fail, or give false positives. */ package fake diff --git a/pkg/client/fake/typeconverter.go b/pkg/client/fake/typeconverter.go new file mode 100644 index 0000000000..3cb3a0dc77 --- /dev/null +++ b/pkg/client/fake/typeconverter.go @@ -0,0 +1,60 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/managedfields" + "sigs.k8s.io/structured-merge-diff/v6/typed" +) + +type multiTypeConverter struct { + upstream []managedfields.TypeConverter +} + +func (m multiTypeConverter) ObjectToTyped(r runtime.Object, o ...typed.ValidationOptions) (*typed.TypedValue, error) { + var errs []error + for _, u := range m.upstream { + res, err := u.ObjectToTyped(r, o...) + if err != nil { + errs = append(errs, err) + continue + } + + return res, nil + } + + return nil, fmt.Errorf("failed to convert Object to TypedValue: %w", kerrors.NewAggregate(errs)) +} + +func (m multiTypeConverter) TypedToObject(v *typed.TypedValue) (runtime.Object, error) { + var errs []error + for _, u := range m.upstream { + res, err := u.TypedToObject(v) + if err != nil { + errs = append(errs, err) + continue + } + + return res, nil + } + + return nil, fmt.Errorf("failed to convert TypedValue to Object: %w", kerrors.NewAggregate(errs)) +} diff --git a/pkg/client/fake/typeconverter_test.go b/pkg/client/fake/typeconverter_test.go new file mode 100644 index 0000000000..8acba79f88 --- /dev/null +++ b/pkg/client/fake/typeconverter_test.go @@ -0,0 +1,200 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "errors" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/managedfields" + "sigs.k8s.io/structured-merge-diff/v6/typed" +) + +var _ = Describe("multiTypeConverter", func() { + Describe("ObjectToTyped", func() { + It("should use first converter when it succeeds", func() { + testObj := &corev1.ConfigMap{Data: map[string]string{"key": "value"}} + testTyped := &typed.TypedValue{} + + firstConverter := &mockTypeConverter{ + objectToTypedResult: testTyped, + } + secondConverter := &mockTypeConverter{ + objectToTypedError: errors.New("second converter should not be called"), + } + + converter := multiTypeConverter{ + upstream: []managedfields.TypeConverter{firstConverter, secondConverter}, + } + + result, err := converter.ObjectToTyped(testObj) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(testTyped)) + }) + + It("should use second converter when first fails", func() { + testObj := &corev1.ConfigMap{Data: map[string]string{"key": "value"}} + testTyped := &typed.TypedValue{} + + firstConverter := &mockTypeConverter{ + objectToTypedError: errors.New("first converter error"), + } + secondConverter := &mockTypeConverter{ + objectToTypedResult: testTyped, + } + + converter := multiTypeConverter{ + upstream: []managedfields.TypeConverter{firstConverter, secondConverter}, + } + + result, err := converter.ObjectToTyped(testObj) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(testTyped)) + }) + + It("should return aggregate error when all converters fail", func() { + testObj := &corev1.ConfigMap{Data: map[string]string{"key": "value"}} + + firstConverter := &mockTypeConverter{ + objectToTypedError: errors.New("first converter error"), + } + secondConverter := &mockTypeConverter{ + objectToTypedError: errors.New("second converter error"), + } + + converter := multiTypeConverter{ + upstream: []managedfields.TypeConverter{firstConverter, secondConverter}, + } + + result, err := converter.ObjectToTyped(testObj) + Expect(err).To(HaveOccurred()) + Expect(result).To(BeNil()) + Expect(err.Error()).To(ContainSubstring("failed to convert Object to Typed")) + Expect(err.Error()).To(ContainSubstring("first converter error")) + Expect(err.Error()).To(ContainSubstring("second converter error")) + }) + + It("should return error when no converters provided", func() { + testObj := &corev1.ConfigMap{Data: map[string]string{"key": "value"}} + + converter := multiTypeConverter{ + upstream: []managedfields.TypeConverter{}, + } + + result, err := converter.ObjectToTyped(testObj) + Expect(err).To(HaveOccurred()) + Expect(result).To(BeNil()) + Expect(err.Error()).To(ContainSubstring("failed to convert Object to Typed")) + }) + }) + + Describe("TypedToObject", func() { + It("should use first converter when it succeeds", func() { + testTyped := &typed.TypedValue{} + testObj := &corev1.ConfigMap{Data: map[string]string{"key": "value"}} + + firstConverter := &mockTypeConverter{ + typedToObjectResult: testObj, + } + secondConverter := &mockTypeConverter{ + typedToObjectError: errors.New("second converter should not be called"), + } + + converter := multiTypeConverter{ + upstream: []managedfields.TypeConverter{firstConverter, secondConverter}, + } + + result, err := converter.TypedToObject(testTyped) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(testObj)) + }) + + It("should use second converter when first fails", func() { + testTyped := &typed.TypedValue{} + testObj := &corev1.ConfigMap{Data: map[string]string{"key": "value"}} + + firstConverter := &mockTypeConverter{ + typedToObjectError: errors.New("first converter error"), + } + secondConverter := &mockTypeConverter{ + typedToObjectResult: testObj, + } + + converter := multiTypeConverter{ + upstream: []managedfields.TypeConverter{firstConverter, secondConverter}, + } + + result, err := converter.TypedToObject(testTyped) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(testObj)) + }) + + It("should return aggregate error when all converters fail", func() { + testTyped := &typed.TypedValue{} + + firstConverter := &mockTypeConverter{ + typedToObjectError: errors.New("first converter error"), + } + secondConverter := &mockTypeConverter{ + typedToObjectError: errors.New("second converter error"), + } + + converter := multiTypeConverter{ + upstream: []managedfields.TypeConverter{firstConverter, secondConverter}, + } + + result, err := converter.TypedToObject(testTyped) + Expect(err).To(HaveOccurred()) + Expect(result).To(BeNil()) + Expect(err.Error()).To(ContainSubstring("failed to convert TypedValue to Object")) + Expect(err.Error()).To(ContainSubstring("first converter error")) + Expect(err.Error()).To(ContainSubstring("second converter error")) + }) + + It("should return error when no converters provided", func() { + testTyped := &typed.TypedValue{} + + converter := multiTypeConverter{ + upstream: []managedfields.TypeConverter{}, + } + + result, err := converter.TypedToObject(testTyped) + Expect(err).To(HaveOccurred()) + Expect(result).To(BeNil()) + Expect(err.Error()).To(ContainSubstring("failed to convert TypedValue to Object")) + }) + }) +}) + +type mockTypeConverter struct { + objectToTypedResult *typed.TypedValue + objectToTypedError error + + typedToObjectResult runtime.Object + typedToObjectError error +} + +func (m *mockTypeConverter) ObjectToTyped(r runtime.Object, o ...typed.ValidationOptions) (*typed.TypedValue, error) { + return m.objectToTypedResult, m.objectToTypedError +} + +func (m *mockTypeConverter) TypedToObject(v *typed.TypedValue) (runtime.Object, error) { + return m.typedToObjectResult, m.typedToObjectError +} diff --git a/pkg/client/fake/versioned_tracker.go b/pkg/client/fake/versioned_tracker.go new file mode 100644 index 0000000000..bc1eaeb951 --- /dev/null +++ b/pkg/client/fake/versioned_tracker.go @@ -0,0 +1,361 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "bytes" + "errors" + "fmt" + "runtime/debug" + "strconv" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/managedfields" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/testing" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +var _ testing.ObjectTracker = (*versionedTracker)(nil) + +type versionedTracker struct { + upstream testing.ObjectTracker + scheme *runtime.Scheme + withStatusSubresource sets.Set[schema.GroupVersionKind] + usesFieldManagedObjectTracker bool +} + +func (t versionedTracker) Add(obj runtime.Object) error { + var objects []runtime.Object + if meta.IsListType(obj) { + var err error + objects, err = meta.ExtractList(obj) + if err != nil { + return err + } + } else { + objects = []runtime.Object{obj} + } + for _, obj := range objects { + accessor, err := meta.Accessor(obj) + if err != nil { + return fmt.Errorf("failed to get accessor for object: %w", err) + } + if accessor.GetDeletionTimestamp() != nil && len(accessor.GetFinalizers()) == 0 { + return fmt.Errorf("refusing to create obj %s with metadata.deletionTimestamp but no finalizers", accessor.GetName()) + } + if accessor.GetResourceVersion() == "" { + // We use a "magic" value of 999 here because this field + // is parsed as uint and and 0 is already used in Update. + // As we can't go lower, go very high instead so this can + // be recognized + accessor.SetResourceVersion(trackerAddResourceVersion) + } + + obj, err = convertFromUnstructuredIfNecessary(t.scheme, obj) + if err != nil { + return err + } + + // If the fieldManager can not decode fields, it will just silently clear them. This is pretty + // much guaranteed not to be what someone that initializes a fake client with objects that + // have them set wants, so validate them here. + // Ref https://github.com/kubernetes/kubernetes/blob/a956ef4862993b825bcd524a19260192ff1da72d/staging/src/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go#L105 + if t.usesFieldManagedObjectTracker { + if err := managedfields.ValidateManagedFields(accessor.GetManagedFields()); err != nil { + return fmt.Errorf("invalid managedFields on %T: %w", obj, err) + } + } + if err := t.upstream.Add(obj); err != nil { + return err + } + } + + return nil +} + +func (t versionedTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.CreateOptions) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return fmt.Errorf("failed to get accessor for object: %w", err) + } + if accessor.GetName() == "" { + gvk, _ := apiutil.GVKForObject(obj, t.scheme) + return apierrors.NewInvalid( + gvk.GroupKind(), + accessor.GetName(), + field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")}) + } + if accessor.GetResourceVersion() != "" { + return apierrors.NewBadRequest("resourceVersion can not be set for Create requests") + } + accessor.SetResourceVersion("1") + obj, err = convertFromUnstructuredIfNecessary(t.scheme, obj) + if err != nil { + return err + } + if err := t.upstream.Create(gvr, obj, ns, opts...); err != nil { + accessor.SetResourceVersion("") + return err + } + + return nil +} + +func (t versionedTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.UpdateOptions) error { + updateOpts, err := getSingleOrZeroOptions(opts) + if err != nil { + return err + } + + return t.update(gvr, obj, ns, false, false, updateOpts) +} + +func (t versionedTracker) update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, isStatus, deleting bool, opts metav1.UpdateOptions) error { + gvk, err := apiutil.GVKForObject(obj, t.scheme) + if err != nil { + return err + } + obj, needsCreate, err := t.updateObject(gvr, gvk, obj, ns, isStatus, deleting, allowsCreateOnUpdate(gvk), opts.DryRun) + if err != nil { + return err + } + + if needsCreate { + opts := metav1.CreateOptions{DryRun: opts.DryRun, FieldManager: opts.FieldManager} + return t.Create(gvr, obj, ns, opts) + } + + if obj == nil { // Object was deleted in updateObject + return nil + } + + if u, unstructured := obj.(*unstructured.Unstructured); unstructured { + u.SetGroupVersionKind(gvk) + } + + return t.upstream.Update(gvr, obj, ns, opts) +} + +func (t versionedTracker) Patch(gvr schema.GroupVersionResource, obj runtime.Object, ns string, opts ...metav1.PatchOptions) error { + patchOptions, err := getSingleOrZeroOptions(opts) + if err != nil { + return err + } + + gvk, err := apiutil.GVKForObject(obj, t.scheme) + if err != nil { + return err + } + + // We apply patches using a client-go reaction that ends up calling the trackers Patch. As we can't change + // that reaction, we use the callstack to figure out if this originated from the status client. + isStatus := bytes.Contains(debug.Stack(), []byte("sigs.k8s.io/controller-runtime/pkg/client/fake.(*fakeSubResourceClient).statusPatch")) + + obj, needsCreate, err := t.updateObject(gvr, gvk, obj, ns, isStatus, false, allowsCreateOnUpdate(gvk), patchOptions.DryRun) + if err != nil { + return err + } + if needsCreate { + opts := metav1.CreateOptions{DryRun: patchOptions.DryRun, FieldManager: patchOptions.FieldManager} + return t.Create(gvr, obj, ns, opts) + } + + if obj == nil { // Object was deleted in updateObject + return nil + } + + return t.upstream.Patch(gvr, obj, ns, patchOptions) +} + +// updateObject performs a number of validations and changes related to +// object updates, such as checking and updating the resourceVersion. +func (t versionedTracker) updateObject( + gvr schema.GroupVersionResource, + gvk schema.GroupVersionKind, + obj runtime.Object, + ns string, + isStatus bool, + deleting bool, + allowCreateOnUpdate bool, + dryRun []string, +) (result runtime.Object, needsCreate bool, _ error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return nil, false, fmt.Errorf("failed to get accessor for object: %w", err) + } + + if accessor.GetName() == "" { + return nil, false, apierrors.NewInvalid( + gvk.GroupKind(), + accessor.GetName(), + field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")}) + } + + oldObject, err := t.Get(gvr, ns, accessor.GetName()) + if err != nil { + // If the resource is not found and the resource allows create on update, issue a + // create instead. + if apierrors.IsNotFound(err) && allowCreateOnUpdate { + // Pass this info to the caller rather than create, because in the SSA case it + // must be created by calling Apply in the upstream tracker, not Create. + // This is because SSA considers Apply and Non-Apply operations to be different + // even when they use the same fieldManager. This behavior is also observable + // with a real Kubernetes apiserver. + // + // Ref https://kubernetes.slack.com/archives/C0EG7JC6T/p1757868204458989?thread_ts=1757808656.002569&cid=C0EG7JC6T + return obj, true, nil + } + return obj, false, err + } + + if t.withStatusSubresource.Has(gvk) { + if isStatus { // copy everything but status and metadata.ResourceVersion from original object + if err := copyStatusFrom(obj, oldObject); err != nil { + return nil, false, fmt.Errorf("failed to copy non-status field for object with status subresouce: %w", err) + } + passedRV := accessor.GetResourceVersion() + if err := copyFrom(oldObject, obj); err != nil { + return nil, false, fmt.Errorf("failed to restore non-status fields: %w", err) + } + accessor.SetResourceVersion(passedRV) + } else { // copy status from original object + if err := copyStatusFrom(oldObject, obj); err != nil { + return nil, false, fmt.Errorf("failed to copy the status for object with status subresource: %w", err) + } + } + } else if isStatus { + return nil, false, apierrors.NewNotFound(gvr.GroupResource(), accessor.GetName()) + } + + oldAccessor, err := meta.Accessor(oldObject) + if err != nil { + return nil, false, err + } + + // If the new object does not have the resource version set and it allows unconditional update, + // default it to the resource version of the existing resource + if accessor.GetResourceVersion() == "" { + switch { + case allowsUnconditionalUpdate(gvk): + accessor.SetResourceVersion(oldAccessor.GetResourceVersion()) + // This is needed because if the patch explicitly sets the RV to null, the client-go reaction we use + // to apply it and whose output we process here will have it unset. It is not clear why the Kubernetes + // apiserver accepts such a patch, but it does so we just copy that behavior. + // Kubernetes apiserver behavior can be checked like this: + // `kubectl patch configmap foo --patch '{"metadata":{"annotations":{"foo":"bar"},"resourceVersion":null}}' -v=9` + case bytes. + Contains(debug.Stack(), []byte("sigs.k8s.io/controller-runtime/pkg/client/fake.(*fakeClient).Patch")): + // We apply patches using a client-go reaction that ends up calling the trackers Update. As we can't change + // that reaction, we use the callstack to figure out if this originated from the "fakeClient.Patch" func. + accessor.SetResourceVersion(oldAccessor.GetResourceVersion()) + } + } + + if accessor.GetResourceVersion() != oldAccessor.GetResourceVersion() { + return nil, false, apierrors.NewConflict(gvr.GroupResource(), accessor.GetName(), errors.New("object was modified")) + } + if oldAccessor.GetResourceVersion() == "" { + oldAccessor.SetResourceVersion("0") + } + intResourceVersion, err := strconv.ParseUint(oldAccessor.GetResourceVersion(), 10, 64) + if err != nil { + return nil, false, fmt.Errorf("can not convert resourceVersion %q to int: %w", oldAccessor.GetResourceVersion(), err) + } + intResourceVersion++ + accessor.SetResourceVersion(strconv.FormatUint(intResourceVersion, 10)) + + if !deleting && !deletionTimestampEqual(accessor, oldAccessor) { + return nil, false, fmt.Errorf("error: Unable to edit %s: metadata.deletionTimestamp field is immutable", accessor.GetName()) + } + + if !accessor.GetDeletionTimestamp().IsZero() && len(accessor.GetFinalizers()) == 0 { + return nil, false, t.Delete(gvr, accessor.GetNamespace(), accessor.GetName(), metav1.DeleteOptions{DryRun: dryRun}) + } + + obj, err = convertFromUnstructuredIfNecessary(t.scheme, obj) + return obj, false, err +} + +func (t versionedTracker) Apply(gvr schema.GroupVersionResource, applyConfiguration runtime.Object, ns string, opts ...metav1.PatchOptions) error { + patchOptions, err := getSingleOrZeroOptions(opts) + if err != nil { + return err + } + gvk, err := apiutil.GVKForObject(applyConfiguration, t.scheme) + if err != nil { + return err + } + isStatus := bytes.Contains(debug.Stack(), []byte("sigs.k8s.io/controller-runtime/pkg/client/fake.(*fakeSubResourceClient).statusPatch")) + + applyConfiguration, needsCreate, err := t.updateObject(gvr, gvk, applyConfiguration, ns, isStatus, false, true, patchOptions.DryRun) + if err != nil { + return err + } + + if needsCreate { + // https://github.com/kubernetes/kubernetes/blob/81affffa1b8d8079836f4cac713ea8d1b2bbf10f/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go#L606 + accessor, err := meta.Accessor(applyConfiguration) + if err != nil { + return fmt.Errorf("failed to get accessor for object: %w", err) + } + if accessor.GetUID() != "" { + return apierrors.NewConflict(gvr.GroupResource(), accessor.GetName(), fmt.Errorf("uid mismatch: the provided object specified uid %s, and no existing object was found", accessor.GetUID())) + } + + if t.withStatusSubresource.Has(gvk) { + // Clear out status for create, for update this is handled in updateObject + if err := copyStatusFrom(&unstructured.Unstructured{}, applyConfiguration); err != nil { + return err + } + } + } + + if applyConfiguration == nil { // Object was deleted in updateObject + return nil + } + + if isStatus { + // We restore everything but status from the tracker where we don't put GVK + // into the object but it must be set for the ManagedFieldsObjectTracker + applyConfiguration.GetObjectKind().SetGroupVersionKind(gvk) + } + return t.upstream.Apply(gvr, applyConfiguration, ns, opts...) +} + +func (t versionedTracker) Delete(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.DeleteOptions) error { + return t.upstream.Delete(gvr, ns, name, opts...) +} + +func (t versionedTracker) Get(gvr schema.GroupVersionResource, ns, name string, opts ...metav1.GetOptions) (runtime.Object, error) { + return t.upstream.Get(gvr, ns, name, opts...) +} + +func (t versionedTracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string, opts ...metav1.ListOptions) (runtime.Object, error) { + return t.upstream.List(gvr, gvk, ns, opts...) +} + +func (t versionedTracker) Watch(gvr schema.GroupVersionResource, ns string, opts ...metav1.ListOptions) (watch.Interface, error) { + return t.upstream.Watch(gvr, ns, opts...) +} diff --git a/pkg/client/fieldowner.go b/pkg/client/fieldowner.go new file mode 100644 index 0000000000..5d9437ba91 --- /dev/null +++ b/pkg/client/fieldowner.go @@ -0,0 +1,114 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// WithFieldOwner wraps a Client and adds the fieldOwner as the field +// manager to all write requests from this client. If additional [FieldOwner] +// options are specified on methods of this client, the value specified here +// will be overridden. +func WithFieldOwner(c Client, fieldOwner string) Client { + return &clientWithFieldManager{ + owner: fieldOwner, + c: c, + Reader: c, + } +} + +type clientWithFieldManager struct { + owner string + c Client + Reader +} + +func (f *clientWithFieldManager) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + return f.c.Create(ctx, obj, append([]CreateOption{FieldOwner(f.owner)}, opts...)...) +} + +func (f *clientWithFieldManager) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + return f.c.Update(ctx, obj, append([]UpdateOption{FieldOwner(f.owner)}, opts...)...) +} + +func (f *clientWithFieldManager) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + return f.c.Patch(ctx, obj, patch, append([]PatchOption{FieldOwner(f.owner)}, opts...)...) +} + +func (f *clientWithFieldManager) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...ApplyOption) error { + return f.c.Apply(ctx, obj, append([]ApplyOption{FieldOwner(f.owner)}, opts...)...) +} + +func (f *clientWithFieldManager) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + return f.c.Delete(ctx, obj, opts...) +} + +func (f *clientWithFieldManager) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + return f.c.DeleteAllOf(ctx, obj, opts...) +} + +func (f *clientWithFieldManager) Scheme() *runtime.Scheme { return f.c.Scheme() } +func (f *clientWithFieldManager) RESTMapper() meta.RESTMapper { return f.c.RESTMapper() } +func (f *clientWithFieldManager) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return f.c.GroupVersionKindFor(obj) +} +func (f *clientWithFieldManager) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return f.c.IsObjectNamespaced(obj) +} + +func (f *clientWithFieldManager) Status() StatusWriter { + return &subresourceClientWithFieldOwner{ + owner: f.owner, + subresourceWriter: f.c.Status(), + } +} + +func (f *clientWithFieldManager) SubResource(subresource string) SubResourceClient { + c := f.c.SubResource(subresource) + return &subresourceClientWithFieldOwner{ + owner: f.owner, + subresourceWriter: c, + SubResourceReader: c, + } +} + +type subresourceClientWithFieldOwner struct { + owner string + subresourceWriter SubResourceWriter + SubResourceReader +} + +func (f *subresourceClientWithFieldOwner) Create(ctx context.Context, obj Object, subresource Object, opts ...SubResourceCreateOption) error { + return f.subresourceWriter.Create(ctx, obj, subresource, append([]SubResourceCreateOption{FieldOwner(f.owner)}, opts...)...) +} + +func (f *subresourceClientWithFieldOwner) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { + return f.subresourceWriter.Update(ctx, obj, append([]SubResourceUpdateOption{FieldOwner(f.owner)}, opts...)...) +} + +func (f *subresourceClientWithFieldOwner) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { + return f.subresourceWriter.Patch(ctx, obj, patch, append([]SubResourcePatchOption{FieldOwner(f.owner)}, opts...)...) +} + +func (f *subresourceClientWithFieldOwner) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...SubResourceApplyOption) error { + return f.subresourceWriter.Apply(ctx, obj, append([]SubResourceApplyOption{FieldOwner(f.owner)}, opts...)...) +} diff --git a/pkg/client/fieldowner_test.go b/pkg/client/fieldowner_test.go new file mode 100644 index 0000000000..069abbc115 --- /dev/null +++ b/pkg/client/fieldowner_test.go @@ -0,0 +1,180 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client_test + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + corev1applyconfigurations "k8s.io/client-go/applyconfigurations/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/client/interceptor" +) + +func TestWithFieldOwner(t *testing.T) { + calls := 0 + fakeClient := testClient(t, "custom-field-mgr", func() { calls++ }) + wrappedClient := client.WithFieldOwner(fakeClient, "custom-field-mgr") + + ctx := t.Context() + dummyObj := &corev1.Namespace{} + dummyObjectAC := corev1applyconfigurations.Namespace(dummyObj.Name) + + _ = wrappedClient.Create(ctx, dummyObj) + _ = wrappedClient.Update(ctx, dummyObj) + _ = wrappedClient.Patch(ctx, dummyObj, nil) + _ = wrappedClient.Apply(ctx, dummyObjectAC) + _ = wrappedClient.Status().Create(ctx, dummyObj, dummyObj) + _ = wrappedClient.Status().Update(ctx, dummyObj) + _ = wrappedClient.Status().Patch(ctx, dummyObj, nil) + _ = wrappedClient.Status().Apply(ctx, dummyObjectAC) + _ = wrappedClient.SubResource("some-subresource").Create(ctx, dummyObj, dummyObj) + _ = wrappedClient.SubResource("some-subresource").Update(ctx, dummyObj) + _ = wrappedClient.SubResource("some-subresource").Patch(ctx, dummyObj, nil) + _ = wrappedClient.SubResource("some-subresource").Apply(ctx, dummyObjectAC) + + if expectedCalls := 12; calls != expectedCalls { + t.Fatalf("wrong number of calls to assertions: expected=%d; got=%d", expectedCalls, calls) + } +} + +func TestWithFieldOwnerOverridden(t *testing.T) { + calls := 0 + + fakeClient := testClient(t, "new-field-manager", func() { calls++ }) + wrappedClient := client.WithFieldOwner(fakeClient, "old-field-manager") + + ctx := t.Context() + dummyObj := &corev1.Namespace{} + dummyObjectAC := corev1applyconfigurations.Namespace(dummyObj.Name) + + _ = wrappedClient.Create(ctx, dummyObj, client.FieldOwner("new-field-manager")) + _ = wrappedClient.Update(ctx, dummyObj, client.FieldOwner("new-field-manager")) + _ = wrappedClient.Patch(ctx, dummyObj, nil, client.FieldOwner("new-field-manager")) + _ = wrappedClient.Apply(ctx, dummyObjectAC, client.FieldOwner("new-field-manager")) + _ = wrappedClient.Status().Create(ctx, dummyObj, dummyObj, client.FieldOwner("new-field-manager")) + _ = wrappedClient.Status().Update(ctx, dummyObj, client.FieldOwner("new-field-manager")) + _ = wrappedClient.Status().Patch(ctx, dummyObj, nil, client.FieldOwner("new-field-manager")) + _ = wrappedClient.Status().Apply(ctx, dummyObjectAC, client.FieldOwner("new-field-manager")) + _ = wrappedClient.SubResource("some-subresource").Create(ctx, dummyObj, dummyObj, client.FieldOwner("new-field-manager")) + _ = wrappedClient.SubResource("some-subresource").Update(ctx, dummyObj, client.FieldOwner("new-field-manager")) + _ = wrappedClient.SubResource("some-subresource").Patch(ctx, dummyObj, nil, client.FieldOwner("new-field-manager")) + _ = wrappedClient.SubResource("some-subresource").Apply(ctx, dummyObjectAC, client.FieldOwner("new-field-manager")) + + if expectedCalls := 12; calls != expectedCalls { + t.Fatalf("wrong number of calls to assertions: expected=%d; got=%d", expectedCalls, calls) + } +} + +// testClient is a helper function that checks if calls have the expected field manager, +// and calls the callback function on each intercepted call. +func testClient(t *testing.T, expectedFieldManager string, callback func()) client.Client { + // TODO: we could use the dummyClient in interceptor pkg if we move it to an internal pkg + return fake.NewClientBuilder().WithInterceptorFuncs(interceptor.Funcs{ + Create: func(ctx context.Context, c client.WithWatch, obj client.Object, opts ...client.CreateOption) error { + callback() + out := &client.CreateOptions{} + for _, f := range opts { + f.ApplyToCreate(out) + } + if got := out.FieldManager; expectedFieldManager != got { + t.Fatalf("wrong field manager: expected=%q; got=%q", expectedFieldManager, got) + } + return nil + }, + Update: func(ctx context.Context, c client.WithWatch, obj client.Object, opts ...client.UpdateOption) error { + callback() + out := &client.UpdateOptions{} + for _, f := range opts { + f.ApplyToUpdate(out) + } + if got := out.FieldManager; expectedFieldManager != got { + t.Fatalf("wrong field manager: expected=%q; got=%q", expectedFieldManager, got) + } + return nil + }, + Patch: func(ctx context.Context, c client.WithWatch, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + callback() + out := &client.PatchOptions{} + for _, f := range opts { + f.ApplyToPatch(out) + } + if got := out.FieldManager; expectedFieldManager != got { + t.Fatalf("wrong field manager: expected=%q; got=%q", expectedFieldManager, got) + } + return nil + }, + SubResourceCreate: func(ctx context.Context, c client.Client, subResourceName string, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error { + callback() + out := &client.SubResourceCreateOptions{} + for _, f := range opts { + f.ApplyToSubResourceCreate(out) + } + if got := out.FieldManager; expectedFieldManager != got { + t.Fatalf("wrong field manager: expected=%q; got=%q", expectedFieldManager, got) + } + return nil + }, + SubResourceUpdate: func(ctx context.Context, c client.Client, subResourceName string, obj client.Object, opts ...client.SubResourceUpdateOption) error { + callback() + out := &client.SubResourceUpdateOptions{} + for _, f := range opts { + f.ApplyToSubResourceUpdate(out) + } + if got := out.FieldManager; expectedFieldManager != got { + t.Fatalf("wrong field manager: expected=%q; got=%q", expectedFieldManager, got) + } + return nil + }, + SubResourcePatch: func(ctx context.Context, c client.Client, subResourceName string, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error { + callback() + out := &client.SubResourcePatchOptions{} + for _, f := range opts { + f.ApplyToSubResourcePatch(out) + } + if got := out.FieldManager; expectedFieldManager != got { + t.Fatalf("wrong field manager: expected=%q; got=%q", expectedFieldManager, got) + } + return nil + }, + Apply: func(ctx context.Context, c client.WithWatch, obj runtime.ApplyConfiguration, opts ...client.ApplyOption) error { + callback() + out := &client.ApplyOptions{} + for _, f := range opts { + f.ApplyToApply(out) + } + if got := out.FieldManager; expectedFieldManager != got { + t.Fatalf("wrong field manager: expected=%q; got=%q", expectedFieldManager, got) + } + return nil + }, + SubResourceApply: func(ctx context.Context, c client.Client, subResourceName string, obj runtime.ApplyConfiguration, opts ...client.SubResourceApplyOption) error { + callback() + out := &client.SubResourceApplyOptions{} + for _, f := range opts { + f.ApplyToSubResourceApply(out) + } + if got := out.FieldManager; expectedFieldManager != got { + t.Fatalf("wrong field manager: expected=%q; got=%q", expectedFieldManager, got) + } + return nil + }, + }).Build() +} diff --git a/pkg/client/fieldvalidation.go b/pkg/client/fieldvalidation.go new file mode 100644 index 0000000000..b0f660854e --- /dev/null +++ b/pkg/client/fieldvalidation.go @@ -0,0 +1,117 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// WithFieldValidation wraps a Client and configures field validation, by +// default, for all write requests from this client. Users can override field +// validation for individual write requests. +// +// This wrapper has no effect on apply requests, as they do not support a +// custom fieldValidation setting, it is always strict. +func WithFieldValidation(c Client, validation FieldValidation) Client { + return &clientWithFieldValidation{ + validation: validation, + client: c, + Reader: c, + } +} + +type clientWithFieldValidation struct { + validation FieldValidation + client Client + Reader +} + +func (c *clientWithFieldValidation) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + return c.client.Create(ctx, obj, append([]CreateOption{c.validation}, opts...)...) +} + +func (c *clientWithFieldValidation) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + return c.client.Update(ctx, obj, append([]UpdateOption{c.validation}, opts...)...) +} + +func (c *clientWithFieldValidation) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + return c.client.Patch(ctx, obj, patch, append([]PatchOption{c.validation}, opts...)...) +} + +func (c *clientWithFieldValidation) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...ApplyOption) error { + return c.client.Apply(ctx, obj, opts...) +} + +func (c *clientWithFieldValidation) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + return c.client.Delete(ctx, obj, opts...) +} + +func (c *clientWithFieldValidation) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + return c.client.DeleteAllOf(ctx, obj, opts...) +} + +func (c *clientWithFieldValidation) Scheme() *runtime.Scheme { return c.client.Scheme() } +func (c *clientWithFieldValidation) RESTMapper() meta.RESTMapper { return c.client.RESTMapper() } +func (c *clientWithFieldValidation) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return c.client.GroupVersionKindFor(obj) +} + +func (c *clientWithFieldValidation) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return c.client.IsObjectNamespaced(obj) +} + +func (c *clientWithFieldValidation) Status() StatusWriter { + return &subresourceClientWithFieldValidation{ + validation: c.validation, + subresourceWriter: c.client.Status(), + } +} + +func (c *clientWithFieldValidation) SubResource(subresource string) SubResourceClient { + srClient := c.client.SubResource(subresource) + return &subresourceClientWithFieldValidation{ + validation: c.validation, + subresourceWriter: srClient, + SubResourceReader: srClient, + } +} + +type subresourceClientWithFieldValidation struct { + validation FieldValidation + subresourceWriter SubResourceWriter + SubResourceReader +} + +func (c *subresourceClientWithFieldValidation) Create(ctx context.Context, obj Object, subresource Object, opts ...SubResourceCreateOption) error { + return c.subresourceWriter.Create(ctx, obj, subresource, append([]SubResourceCreateOption{c.validation}, opts...)...) +} + +func (c *subresourceClientWithFieldValidation) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { + return c.subresourceWriter.Update(ctx, obj, append([]SubResourceUpdateOption{c.validation}, opts...)...) +} + +func (c *subresourceClientWithFieldValidation) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { + return c.subresourceWriter.Patch(ctx, obj, patch, append([]SubResourcePatchOption{c.validation}, opts...)...) +} + +func (c *subresourceClientWithFieldValidation) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...SubResourceApplyOption) error { + return c.subresourceWriter.Apply(ctx, obj, opts...) +} diff --git a/pkg/client/fieldvalidation_test.go b/pkg/client/fieldvalidation_test.go new file mode 100644 index 0000000000..6e6e9e5d17 --- /dev/null +++ b/pkg/client/fieldvalidation_test.go @@ -0,0 +1,297 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client_test + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + corev1applyconfigurations "k8s.io/client-go/applyconfigurations/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/client/interceptor" +) + +var _ = Describe("ClientWithFieldValidation", func() { + It("should return errors for invalid fields when using strict validation", func(ctx SpecContext) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + wrappedClient := client.WithFieldValidation(cl, metav1.FieldValidationStrict) + + baseNode := &unstructured.Unstructured{} + baseNode.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Kind: "Node", + Version: "v1", + }) + baseNode.SetName("client-with-field-validation-test-node") + + validNode := baseNode.DeepCopy() + patch := client.MergeFrom(validNode.DeepCopy()) + + invalidNode := baseNode.DeepCopy() + Expect(unstructured.SetNestedField(invalidNode.Object, "value", "spec", "invalidField")).To(Succeed()) + + invalidStatusNode := baseNode.DeepCopy() + Expect(unstructured.SetNestedField(invalidStatusNode.Object, "value", "status", "invalidStatusField")).To(Succeed()) + + err = wrappedClient.Create(ctx, invalidNode) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("strict decoding error: unknown field \"spec.invalidField\"")) + + err = wrappedClient.Create(ctx, validNode) + Expect(err).ToNot(HaveOccurred()) + + err = wrappedClient.Update(ctx, invalidNode) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("strict decoding error: unknown field \"spec.invalidField\"")) + + err = wrappedClient.Patch(ctx, invalidNode, patch) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("strict decoding error: unknown field \"spec.invalidField\"")) + + // Status.Create is not supported on Nodes + + err = wrappedClient.Status().Update(ctx, invalidStatusNode) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("strict decoding error: unknown field \"status.invalidStatusField\"")) + + err = wrappedClient.Status().Patch(ctx, invalidStatusNode, patch) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("strict decoding error: unknown field \"status.invalidStatusField\"")) + + // Status.Create is not supported on Nodes + + err = wrappedClient.SubResource("status").Update(ctx, invalidStatusNode) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("strict decoding error: unknown field \"status.invalidStatusField\"")) + + err = wrappedClient.SubResource("status").Patch(ctx, invalidStatusNode, patch) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("strict decoding error: unknown field \"status.invalidStatusField\"")) + + invalidApplyConfig := client.ApplyConfigurationFromUnstructured(invalidStatusNode) + err = wrappedClient.Status().Apply(ctx, invalidApplyConfig, client.FieldOwner("test-owner")) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("field not declared in schema")) + + err = wrappedClient.SubResource("status").Apply(ctx, invalidApplyConfig, client.FieldOwner("test-owner")) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("field not declared in schema")) + }) +}) + +func TestWithStrictFieldValidation(t *testing.T) { + calls := 0 + fakeClient := testFieldValidationClient(t, metav1.FieldValidationStrict, func() { calls++ }) + wrappedClient := client.WithFieldValidation(fakeClient, metav1.FieldValidationStrict) + + ctx := t.Context() + dummyObj := &corev1.Namespace{} + + _ = wrappedClient.Create(ctx, dummyObj) + _ = wrappedClient.Update(ctx, dummyObj) + _ = wrappedClient.Apply(ctx, corev1applyconfigurations.ConfigMap("foo", "bar")) + _ = wrappedClient.Patch(ctx, dummyObj, nil) + _ = wrappedClient.Status().Create(ctx, dummyObj, dummyObj) + _ = wrappedClient.Status().Update(ctx, dummyObj) + _ = wrappedClient.Status().Patch(ctx, dummyObj, nil) + _ = wrappedClient.Status().Apply(ctx, corev1applyconfigurations.Namespace(""), nil) + _ = wrappedClient.SubResource("some-subresource").Create(ctx, dummyObj, dummyObj) + _ = wrappedClient.SubResource("some-subresource").Update(ctx, dummyObj) + _ = wrappedClient.SubResource("some-subresource").Patch(ctx, dummyObj, nil) + _ = wrappedClient.SubResource("some-subresource").Apply(ctx, corev1applyconfigurations.Namespace(""), nil) + + if expectedCalls := 12; calls != expectedCalls { + t.Fatalf("wrong number of calls to assertions: expected=%d; got=%d", expectedCalls, calls) + } +} + +func TestWithStrictFieldValidationOverridden(t *testing.T) { + calls := 0 + + fakeClient := testFieldValidationClient(t, metav1.FieldValidationWarn, func() { calls++ }) + wrappedClient := client.WithFieldValidation(fakeClient, metav1.FieldValidationStrict) + + ctx := t.Context() + dummyObj := &corev1.Namespace{} + + _ = wrappedClient.Create(ctx, dummyObj, client.FieldValidation(metav1.FieldValidationWarn)) + _ = wrappedClient.Update(ctx, dummyObj, client.FieldValidation(metav1.FieldValidationWarn)) + _ = wrappedClient.Patch(ctx, dummyObj, nil, client.FieldValidation(metav1.FieldValidationWarn)) + _ = wrappedClient.Status().Create(ctx, dummyObj, dummyObj, client.FieldValidation(metav1.FieldValidationWarn)) + _ = wrappedClient.Status().Update(ctx, dummyObj, client.FieldValidation(metav1.FieldValidationWarn)) + _ = wrappedClient.Status().Patch(ctx, dummyObj, nil, client.FieldValidation(metav1.FieldValidationWarn)) + _ = wrappedClient.SubResource("some-subresource").Create(ctx, dummyObj, dummyObj, client.FieldValidation(metav1.FieldValidationWarn)) + _ = wrappedClient.SubResource("some-subresource").Update(ctx, dummyObj, client.FieldValidation(metav1.FieldValidationWarn)) + _ = wrappedClient.SubResource("some-subresource").Patch(ctx, dummyObj, nil, client.FieldValidation(metav1.FieldValidationWarn)) + + if expectedCalls := 9; calls != expectedCalls { + t.Fatalf("wrong number of calls to assertions: expected=%d; got=%d", expectedCalls, calls) + } +} + +// testFieldValidationClient is a helper function that checks if calls have the expected field validation, +// and calls the callback function on each intercepted call. +func testFieldValidationClient(t *testing.T, expectedFieldValidation string, callback func()) client.Client { + // TODO: we could use the dummyClient in interceptor pkg if we move it to an internal pkg + return fake.NewClientBuilder().WithInterceptorFuncs(interceptor.Funcs{ + Create: func(ctx context.Context, c client.WithWatch, obj client.Object, opts ...client.CreateOption) error { + callback() + out := &client.CreateOptions{} + for _, f := range opts { + f.ApplyToCreate(out) + } + if got := out.FieldValidation; expectedFieldValidation != got { + t.Fatalf("wrong field validation: expected=%q; got=%q", expectedFieldValidation, got) + } + + if got := out.AsCreateOptions().FieldValidation; expectedFieldValidation != got { + t.Fatalf("wrong field validation: expected=%q; got=%q", expectedFieldValidation, got) + } + + co := &client.CreateOptions{} + out.ApplyToCreate(co) + if got := co.FieldValidation; expectedFieldValidation != got { + t.Fatalf("wrong field validation: expected=%q; got=%q", expectedFieldValidation, got) + } + return nil + }, + Update: func(ctx context.Context, c client.WithWatch, obj client.Object, opts ...client.UpdateOption) error { + callback() + out := &client.UpdateOptions{} + for _, f := range opts { + f.ApplyToUpdate(out) + } + if got := out.FieldValidation; expectedFieldValidation != got { + t.Fatalf("wrong field validation: expected=%q; got=%q", expectedFieldValidation, got) + } + + if got := out.AsUpdateOptions().FieldValidation; expectedFieldValidation != got { + t.Fatalf("wrong field validation: expected=%q; got=%q", expectedFieldValidation, got) + } + + co := &client.UpdateOptions{} + out.ApplyToUpdate(co) + if got := co.FieldValidation; expectedFieldValidation != got { + t.Fatalf("wrong field validation: expected=%q; got=%q", expectedFieldValidation, got) + } + return nil + }, + Apply: func(ctx context.Context, client client.WithWatch, obj runtime.ApplyConfiguration, opts ...client.ApplyOption) error { + callback() + return nil + }, + Patch: func(ctx context.Context, c client.WithWatch, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + callback() + out := &client.PatchOptions{} + for _, f := range opts { + f.ApplyToPatch(out) + } + if got := out.FieldValidation; expectedFieldValidation != got { + t.Fatalf("wrong field validation: expected=%q; got=%q", expectedFieldValidation, got) + } + + if got := out.AsPatchOptions().FieldValidation; expectedFieldValidation != got { + t.Fatalf("wrong field validation: expected=%q; got=%q", expectedFieldValidation, got) + } + + co := &client.PatchOptions{} + out.ApplyToPatch(co) + if got := co.FieldValidation; expectedFieldValidation != got { + t.Fatalf("wrong field validation: expected=%q; got=%q", expectedFieldValidation, got) + } + return nil + }, + SubResourceCreate: func(ctx context.Context, c client.Client, subResourceName string, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error { + callback() + out := &client.SubResourceCreateOptions{} + for _, f := range opts { + f.ApplyToSubResourceCreate(out) + } + if got := out.FieldValidation; expectedFieldValidation != got { + t.Fatalf("wrong field validation: expected=%q; got=%q", expectedFieldValidation, got) + } + + if got := out.AsCreateOptions().FieldValidation; expectedFieldValidation != got { + t.Fatalf("wrong field validation: expected=%q; got=%q", expectedFieldValidation, got) + } + + co := &client.CreateOptions{} + out.ApplyToCreate(co) + if got := co.FieldValidation; expectedFieldValidation != got { + t.Fatalf("wrong field validation: expected=%q; got=%q", expectedFieldValidation, got) + } + return nil + }, + SubResourceUpdate: func(ctx context.Context, c client.Client, subResourceName string, obj client.Object, opts ...client.SubResourceUpdateOption) error { + callback() + out := &client.SubResourceUpdateOptions{} + for _, f := range opts { + f.ApplyToSubResourceUpdate(out) + } + if got := out.FieldValidation; expectedFieldValidation != got { + t.Fatalf("wrong field validation: expected=%q; got=%q", expectedFieldValidation, got) + } + + if got := out.AsUpdateOptions().FieldValidation; expectedFieldValidation != got { + t.Fatalf("wrong field validation: expected=%q; got=%q", expectedFieldValidation, got) + } + + co := &client.UpdateOptions{} + out.ApplyToUpdate(co) + if got := co.FieldValidation; expectedFieldValidation != got { + t.Fatalf("wrong field validation: expected=%q; got=%q", expectedFieldValidation, got) + } + return nil + }, + SubResourcePatch: func(ctx context.Context, c client.Client, subResourceName string, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error { + callback() + out := &client.SubResourcePatchOptions{} + for _, f := range opts { + f.ApplyToSubResourcePatch(out) + } + if got := out.FieldValidation; expectedFieldValidation != got { + t.Fatalf("wrong field validation: expected=%q; got=%q", expectedFieldValidation, got) + } + + if got := out.AsPatchOptions().FieldValidation; expectedFieldValidation != got { + t.Fatalf("wrong field validation: expected=%q; got=%q", expectedFieldValidation, got) + } + + co := &client.PatchOptions{} + out.ApplyToPatch(co) + if got := co.FieldValidation; expectedFieldValidation != got { + t.Fatalf("wrong field validation: expected=%q; got=%q", expectedFieldValidation, got) + } + return nil + }, + SubResourceApply: func(ctx context.Context, c client.Client, subResourceName string, obj runtime.ApplyConfiguration, opts ...client.SubResourceApplyOption) error { + callback() + return nil + }, + }).Build() +} diff --git a/pkg/client/interceptor/intercept.go b/pkg/client/interceptor/intercept.go new file mode 100644 index 0000000000..b98af1a693 --- /dev/null +++ b/pkg/client/interceptor/intercept.go @@ -0,0 +1,183 @@ +package interceptor + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Funcs contains functions that are called instead of the underlying client's methods. +type Funcs struct { + Get func(ctx context.Context, client client.WithWatch, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error + List func(ctx context.Context, client client.WithWatch, list client.ObjectList, opts ...client.ListOption) error + Create func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.CreateOption) error + Delete func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.DeleteOption) error + DeleteAllOf func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.DeleteAllOfOption) error + Update func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.UpdateOption) error + Patch func(ctx context.Context, client client.WithWatch, obj client.Object, patch client.Patch, opts ...client.PatchOption) error + Apply func(ctx context.Context, client client.WithWatch, obj runtime.ApplyConfiguration, opts ...client.ApplyOption) error + Watch func(ctx context.Context, client client.WithWatch, obj client.ObjectList, opts ...client.ListOption) (watch.Interface, error) + SubResource func(client client.WithWatch, subResource string) client.SubResourceClient + SubResourceGet func(ctx context.Context, client client.Client, subResourceName string, obj client.Object, subResource client.Object, opts ...client.SubResourceGetOption) error + SubResourceCreate func(ctx context.Context, client client.Client, subResourceName string, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error + SubResourceUpdate func(ctx context.Context, client client.Client, subResourceName string, obj client.Object, opts ...client.SubResourceUpdateOption) error + SubResourcePatch func(ctx context.Context, client client.Client, subResourceName string, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error + SubResourceApply func(ctx context.Context, client client.Client, subResourceName string, obj runtime.ApplyConfiguration, opts ...client.SubResourceApplyOption) error +} + +// NewClient returns a new interceptor client that calls the functions in funcs instead of the underlying client's methods, if they are not nil. +func NewClient(interceptedClient client.WithWatch, funcs Funcs) client.WithWatch { + return interceptor{ + client: interceptedClient, + funcs: funcs, + } +} + +type interceptor struct { + client client.WithWatch + funcs Funcs +} + +var _ client.WithWatch = &interceptor{} + +func (c interceptor) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return c.client.GroupVersionKindFor(obj) +} + +func (c interceptor) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return c.client.IsObjectNamespaced(obj) +} + +func (c interceptor) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + if c.funcs.Get != nil { + return c.funcs.Get(ctx, c.client, key, obj, opts...) + } + return c.client.Get(ctx, key, obj, opts...) +} + +func (c interceptor) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + if c.funcs.List != nil { + return c.funcs.List(ctx, c.client, list, opts...) + } + return c.client.List(ctx, list, opts...) +} + +func (c interceptor) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + if c.funcs.Create != nil { + return c.funcs.Create(ctx, c.client, obj, opts...) + } + return c.client.Create(ctx, obj, opts...) +} + +func (c interceptor) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + if c.funcs.Delete != nil { + return c.funcs.Delete(ctx, c.client, obj, opts...) + } + return c.client.Delete(ctx, obj, opts...) +} + +func (c interceptor) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + if c.funcs.Update != nil { + return c.funcs.Update(ctx, c.client, obj, opts...) + } + return c.client.Update(ctx, obj, opts...) +} + +func (c interceptor) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + if c.funcs.Patch != nil { + return c.funcs.Patch(ctx, c.client, obj, patch, opts...) + } + return c.client.Patch(ctx, obj, patch, opts...) +} + +func (c interceptor) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...client.ApplyOption) error { + if c.funcs.Apply != nil { + return c.funcs.Apply(ctx, c.client, obj, opts...) + } + + return c.client.Apply(ctx, obj, opts...) +} + +func (c interceptor) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { + if c.funcs.DeleteAllOf != nil { + return c.funcs.DeleteAllOf(ctx, c.client, obj, opts...) + } + return c.client.DeleteAllOf(ctx, obj, opts...) +} + +func (c interceptor) Status() client.SubResourceWriter { + return c.SubResource("status") +} + +func (c interceptor) SubResource(subResource string) client.SubResourceClient { + if c.funcs.SubResource != nil { + return c.funcs.SubResource(c.client, subResource) + } + return subResourceInterceptor{ + subResourceName: subResource, + client: c.client, + funcs: c.funcs, + } +} + +func (c interceptor) Scheme() *runtime.Scheme { + return c.client.Scheme() +} + +func (c interceptor) RESTMapper() meta.RESTMapper { + return c.client.RESTMapper() +} + +func (c interceptor) Watch(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) (watch.Interface, error) { + if c.funcs.Watch != nil { + return c.funcs.Watch(ctx, c.client, obj, opts...) + } + return c.client.Watch(ctx, obj, opts...) +} + +type subResourceInterceptor struct { + subResourceName string + client client.Client + funcs Funcs +} + +var _ client.SubResourceClient = &subResourceInterceptor{} + +func (s subResourceInterceptor) Get(ctx context.Context, obj client.Object, subResource client.Object, opts ...client.SubResourceGetOption) error { + if s.funcs.SubResourceGet != nil { + return s.funcs.SubResourceGet(ctx, s.client, s.subResourceName, obj, subResource, opts...) + } + return s.client.SubResource(s.subResourceName).Get(ctx, obj, subResource, opts...) +} + +func (s subResourceInterceptor) Create(ctx context.Context, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error { + if s.funcs.SubResourceCreate != nil { + return s.funcs.SubResourceCreate(ctx, s.client, s.subResourceName, obj, subResource, opts...) + } + return s.client.SubResource(s.subResourceName).Create(ctx, obj, subResource, opts...) +} + +func (s subResourceInterceptor) Update(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + if s.funcs.SubResourceUpdate != nil { + return s.funcs.SubResourceUpdate(ctx, s.client, s.subResourceName, obj, opts...) + } + return s.client.SubResource(s.subResourceName).Update(ctx, obj, opts...) +} + +func (s subResourceInterceptor) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error { + if s.funcs.SubResourcePatch != nil { + return s.funcs.SubResourcePatch(ctx, s.client, s.subResourceName, obj, patch, opts...) + } + return s.client.SubResource(s.subResourceName).Patch(ctx, obj, patch, opts...) +} + +func (s subResourceInterceptor) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...client.SubResourceApplyOption) error { + if s.funcs.SubResourceApply != nil { + return s.funcs.SubResourceApply(ctx, s.client, s.subResourceName, obj, opts...) + } + return s.client.SubResource(s.subResourceName).Apply(ctx, obj, opts...) +} diff --git a/pkg/client/interceptor/intercept_test.go b/pkg/client/interceptor/intercept_test.go new file mode 100644 index 0000000000..fb58dfeac1 --- /dev/null +++ b/pkg/client/interceptor/intercept_test.go @@ -0,0 +1,443 @@ +package interceptor + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("NewClient", func() { + wrappedClient := dummyClient{} + It("should call the provided Get function", func(ctx SpecContext) { + var called bool + client := NewClient(wrappedClient, Funcs{ + Get: func(ctx context.Context, client client.WithWatch, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + called = true + return nil + }, + }) + _ = client.Get(ctx, types.NamespacedName{}, nil) + Expect(called).To(BeTrue()) + }) + It("should call the underlying client if the provided Get function is nil", func(ctx SpecContext) { + var called bool + client1 := NewClient(wrappedClient, Funcs{ + Get: func(ctx context.Context, client client.WithWatch, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + called = true + return nil + }, + }) + client2 := NewClient(client1, Funcs{}) + _ = client2.Get(ctx, types.NamespacedName{}, nil) + Expect(called).To(BeTrue()) + }) + It("should call the provided List function", func(ctx SpecContext) { + var called bool + client := NewClient(wrappedClient, Funcs{ + List: func(ctx context.Context, client client.WithWatch, list client.ObjectList, opts ...client.ListOption) error { + called = true + return nil + }, + }) + _ = client.List(ctx, nil) + Expect(called).To(BeTrue()) + }) + It("should call the underlying client if the provided List function is nil", func(ctx SpecContext) { + var called bool + client1 := NewClient(wrappedClient, Funcs{ + List: func(ctx context.Context, client client.WithWatch, list client.ObjectList, opts ...client.ListOption) error { + called = true + return nil + }, + }) + client2 := NewClient(client1, Funcs{}) + _ = client2.List(ctx, nil) + Expect(called).To(BeTrue()) + }) + It("should call the provided Apply function", func(ctx SpecContext) { + var called bool + client := NewClient(wrappedClient, Funcs{ + Apply: func(ctx context.Context, client client.WithWatch, obj runtime.ApplyConfiguration, opts ...client.ApplyOption) error { + called = true + return nil + }, + }) + _ = client.Apply(ctx, nil) + Expect(called).To(BeTrue()) + }) + It("should call the underlying client if the provided Apply function is nil", func(ctx SpecContext) { + var called bool + client1 := NewClient(wrappedClient, Funcs{ + Apply: func(ctx context.Context, client client.WithWatch, obj runtime.ApplyConfiguration, opts ...client.ApplyOption) error { + called = true + return nil + }, + }) + client2 := NewClient(client1, Funcs{}) + _ = client2.Apply(ctx, nil) + Expect(called).To(BeTrue()) + }) + It("should call the provided Create function", func(ctx SpecContext) { + var called bool + client := NewClient(wrappedClient, Funcs{ + Create: func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.CreateOption) error { + called = true + return nil + }, + }) + _ = client.Create(ctx, nil) + Expect(called).To(BeTrue()) + }) + It("should call the underlying client if the provided Create function is nil", func(ctx SpecContext) { + var called bool + client1 := NewClient(wrappedClient, Funcs{ + Create: func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.CreateOption) error { + called = true + return nil + }, + }) + client2 := NewClient(client1, Funcs{}) + _ = client2.Create(ctx, nil) + Expect(called).To(BeTrue()) + }) + It("should call the provided Delete function", func(ctx SpecContext) { + var called bool + client := NewClient(wrappedClient, Funcs{ + Delete: func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.DeleteOption) error { + called = true + return nil + }, + }) + _ = client.Delete(ctx, nil) + Expect(called).To(BeTrue()) + }) + It("should call the underlying client if the provided Delete function is nil", func(ctx SpecContext) { + var called bool + client1 := NewClient(wrappedClient, Funcs{ + Delete: func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.DeleteOption) error { + called = true + return nil + }, + }) + client2 := NewClient(client1, Funcs{}) + _ = client2.Delete(ctx, nil) + Expect(called).To(BeTrue()) + }) + It("should call the provided DeleteAllOf function", func(ctx SpecContext) { + var called bool + client := NewClient(wrappedClient, Funcs{ + DeleteAllOf: func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.DeleteAllOfOption) error { + called = true + return nil + }, + }) + _ = client.DeleteAllOf(ctx, nil) + Expect(called).To(BeTrue()) + }) + It("should call the underlying client if the provided DeleteAllOf function is nil", func(ctx SpecContext) { + var called bool + client1 := NewClient(wrappedClient, Funcs{ + DeleteAllOf: func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.DeleteAllOfOption) error { + called = true + return nil + }, + }) + client2 := NewClient(client1, Funcs{}) + _ = client2.DeleteAllOf(ctx, nil) + Expect(called).To(BeTrue()) + }) + It("should call the provided Update function", func(ctx SpecContext) { + var called bool + client := NewClient(wrappedClient, Funcs{ + Update: func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.UpdateOption) error { + called = true + return nil + }, + }) + _ = client.Update(ctx, nil) + Expect(called).To(BeTrue()) + }) + It("should call the underlying client if the provided Update function is nil", func(ctx SpecContext) { + var called bool + client1 := NewClient(wrappedClient, Funcs{ + Update: func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.UpdateOption) error { + called = true + return nil + }, + }) + client2 := NewClient(client1, Funcs{}) + _ = client2.Update(ctx, nil) + Expect(called).To(BeTrue()) + }) + It("should call the provided Patch function", func(ctx SpecContext) { + var called bool + client := NewClient(wrappedClient, Funcs{ + Patch: func(ctx context.Context, client client.WithWatch, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + called = true + return nil + }, + }) + _ = client.Patch(ctx, nil, nil) + Expect(called).To(BeTrue()) + }) + It("should call the underlying client if the provided Patch function is nil", func(ctx SpecContext) { + var called bool + client1 := NewClient(wrappedClient, Funcs{ + Patch: func(ctx context.Context, client client.WithWatch, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + called = true + return nil + }, + }) + client2 := NewClient(client1, Funcs{}) + _ = client2.Patch(ctx, nil, nil) + Expect(called).To(BeTrue()) + }) + It("should call the provided Watch function", func(ctx SpecContext) { + var called bool + client := NewClient(wrappedClient, Funcs{ + Watch: func(ctx context.Context, client client.WithWatch, obj client.ObjectList, opts ...client.ListOption) (watch.Interface, error) { + called = true + return nil, nil + }, + }) + _, _ = client.Watch(ctx, nil) + Expect(called).To(BeTrue()) + }) + It("should call the underlying client if the provided Watch function is nil", func(ctx SpecContext) { + var called bool + client1 := NewClient(wrappedClient, Funcs{ + Watch: func(ctx context.Context, client client.WithWatch, obj client.ObjectList, opts ...client.ListOption) (watch.Interface, error) { + called = true + return nil, nil + }, + }) + client2 := NewClient(client1, Funcs{}) + _, _ = client2.Watch(ctx, nil) + Expect(called).To(BeTrue()) + }) + It("should call the provided SubResource function", func() { + var called bool + client := NewClient(wrappedClient, Funcs{ + SubResource: func(client client.WithWatch, subResource string) client.SubResourceClient { + called = true + return nil + }, + }) + _ = client.SubResource("") + Expect(called).To(BeTrue()) + }) + It("should call the provided SubResource function with 'status' when calling Status()", func() { + var called bool + client := NewClient(wrappedClient, Funcs{ + SubResource: func(client client.WithWatch, subResource string) client.SubResourceClient { + if subResource == "status" { + called = true + } + return nil + }, + }) + _ = client.Status() + Expect(called).To(BeTrue()) + }) +}) + +var _ = Describe("NewSubResourceClient", func() { + c := dummyClient{} + It("should call the provided Get function", func(ctx SpecContext) { + var called bool + c := NewClient(c, Funcs{ + SubResourceGet: func(_ context.Context, client client.Client, subResourceName string, obj, subResource client.Object, opts ...client.SubResourceGetOption) error { + called = true + Expect(subResourceName).To(BeEquivalentTo("foo")) + return nil + }, + }) + _ = c.SubResource("foo").Get(ctx, nil, nil) + Expect(called).To(BeTrue()) + }) + It("should call the underlying client if the provided Get function is nil", func(ctx SpecContext) { + var called bool + client1 := NewClient(c, Funcs{ + SubResourceGet: func(_ context.Context, client client.Client, subResourceName string, obj, subResource client.Object, opts ...client.SubResourceGetOption) error { + called = true + Expect(subResourceName).To(BeEquivalentTo("foo")) + return nil + }, + }) + client2 := NewClient(client1, Funcs{}) + _ = client2.SubResource("foo").Get(ctx, nil, nil) + Expect(called).To(BeTrue()) + }) + It("should call the provided Update function", func(ctx SpecContext) { + var called bool + client := NewClient(c, Funcs{ + SubResourceUpdate: func(_ context.Context, client client.Client, subResourceName string, obj client.Object, opts ...client.SubResourceUpdateOption) error { + called = true + Expect(subResourceName).To(BeEquivalentTo("foo")) + return nil + }, + }) + _ = client.SubResource("foo").Update(ctx, nil, nil) + Expect(called).To(BeTrue()) + }) + It("should call the underlying client if the provided Update function is nil", func(ctx SpecContext) { + var called bool + client1 := NewClient(c, Funcs{ + SubResourceUpdate: func(_ context.Context, client client.Client, subResourceName string, obj client.Object, opts ...client.SubResourceUpdateOption) error { + called = true + Expect(subResourceName).To(BeEquivalentTo("foo")) + return nil + }, + }) + client2 := NewClient(client1, Funcs{}) + _ = client2.SubResource("foo").Update(ctx, nil, nil) + Expect(called).To(BeTrue()) + }) + It("should call the provided Patch function", func(ctx SpecContext) { + var called bool + client := NewClient(c, Funcs{ + SubResourcePatch: func(_ context.Context, client client.Client, subResourceName string, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error { + called = true + Expect(subResourceName).To(BeEquivalentTo("foo")) + return nil + }, + }) + _ = client.SubResource("foo").Patch(ctx, nil, nil) + Expect(called).To(BeTrue()) + }) + It("should call the underlying client if the provided Patch function is nil", func(ctx SpecContext) { + var called bool + client1 := NewClient(c, Funcs{ + SubResourcePatch: func(ctx context.Context, client client.Client, subResourceName string, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error { + called = true + Expect(subResourceName).To(BeEquivalentTo("foo")) + return nil + }, + }) + client2 := NewClient(client1, Funcs{}) + _ = client2.SubResource("foo").Patch(ctx, nil, nil) + Expect(called).To(BeTrue()) + }) + It("should call the provided Create function", func(ctx SpecContext) { + var called bool + client := NewClient(c, Funcs{ + SubResourceCreate: func(_ context.Context, client client.Client, subResourceName string, obj, subResource client.Object, opts ...client.SubResourceCreateOption) error { + called = true + Expect(subResourceName).To(BeEquivalentTo("foo")) + return nil + }, + }) + _ = client.SubResource("foo").Create(ctx, nil, nil) + Expect(called).To(BeTrue()) + }) + It("should call the underlying client if the provided Create function is nil", func(ctx SpecContext) { + var called bool + client1 := NewClient(c, Funcs{ + SubResourceCreate: func(_ context.Context, client client.Client, subResourceName string, obj, subResource client.Object, opts ...client.SubResourceCreateOption) error { + called = true + Expect(subResourceName).To(BeEquivalentTo("foo")) + return nil + }, + }) + client2 := NewClient(client1, Funcs{}) + _ = client2.SubResource("foo").Create(ctx, nil, nil) + Expect(called).To(BeTrue()) + }) + It("should call the provided Apply function", func(ctx SpecContext) { + var called bool + client := NewClient(c, Funcs{ + SubResourceApply: func(_ context.Context, client client.Client, subResourceName string, obj runtime.ApplyConfiguration, opts ...client.SubResourceApplyOption) error { + called = true + Expect(subResourceName).To(BeEquivalentTo("foo")) + return nil + }, + }) + _ = client.SubResource("foo").Apply(ctx, nil) + Expect(called).To(BeTrue()) + }) + It("should call the underlying client if the provided Apply function is nil", func(ctx SpecContext) { + var called bool + client1 := NewClient(c, Funcs{ + SubResourceApply: func(_ context.Context, client client.Client, subResourceName string, obj runtime.ApplyConfiguration, opts ...client.SubResourceApplyOption) error { + called = true + Expect(subResourceName).To(BeEquivalentTo("foo")) + return nil + }, + }) + client2 := NewClient(client1, Funcs{}) + _ = client2.SubResource("foo").Apply(ctx, nil) + Expect(called).To(BeTrue()) + }) +}) + +type dummyClient struct{} + +var _ client.WithWatch = &dummyClient{} + +func (d dummyClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + return nil +} + +func (d dummyClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + return nil +} + +func (d dummyClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + return nil +} + +func (d dummyClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + return nil +} + +func (d dummyClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + return nil +} + +func (d dummyClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + return nil +} + +func (d dummyClient) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...client.ApplyOption) error { + return nil +} + +func (d dummyClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { + return nil +} + +func (d dummyClient) Status() client.SubResourceWriter { + return d.SubResource("status") +} + +func (d dummyClient) SubResource(subResource string) client.SubResourceClient { + return nil +} + +func (d dummyClient) Scheme() *runtime.Scheme { + return nil +} + +func (d dummyClient) RESTMapper() meta.RESTMapper { + return nil +} + +func (d dummyClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return schema.GroupVersionKind{}, nil +} + +func (d dummyClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return false, nil +} + +func (d dummyClient) Watch(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) (watch.Interface, error) { + return nil, nil +} diff --git a/pkg/client/interceptor/interceptor_suite_test.go b/pkg/client/interceptor/interceptor_suite_test.go new file mode 100644 index 0000000000..08d9fe2281 --- /dev/null +++ b/pkg/client/interceptor/interceptor_suite_test.go @@ -0,0 +1,36 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package interceptor + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestInterceptor(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Fake client Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) +}) diff --git a/pkg/client/interfaces.go b/pkg/client/interfaces.go index e4ca8be85c..1af1f3a368 100644 --- a/pkg/client/interfaces.go +++ b/pkg/client/interfaces.go @@ -20,22 +20,20 @@ import ( "context" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" ) // ObjectKey identifies a Kubernetes Object. type ObjectKey = types.NamespacedName -// ObjectKeyFromObject returns the ObjectKey given a runtime.Object -func ObjectKeyFromObject(obj runtime.Object) (ObjectKey, error) { - accessor, err := meta.Accessor(obj) - if err != nil { - return ObjectKey{}, err - } - return ObjectKey{Namespace: accessor.GetNamespace(), Name: accessor.GetName()}, nil +// ObjectKeyFromObject returns the ObjectKey given a runtime.Object. +func ObjectKeyFromObject(obj Object) ObjectKey { + return ObjectKey{Namespace: obj.GetNamespace(), Name: obj.GetName()} } // Patch is a patch that can be applied to a Kubernetes object. @@ -43,7 +41,7 @@ type Patch interface { // Type is the PatchType of the patch. Type() types.PatchType // Data is the raw data representing the patch. - Data(obj runtime.Object) ([]byte, error) + Data(obj Object) ([]byte, error) } // TODO(directxman12): is there a sane way to deal with get/delete options? @@ -53,48 +51,119 @@ type Reader interface { // Get retrieves an obj for the given object key from the Kubernetes Cluster. // obj must be a struct pointer so that obj can be updated with the response // returned by the Server. - Get(ctx context.Context, key ObjectKey, obj runtime.Object) error + Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error // List retrieves list of objects for a given namespace and list options. On a // successful call, Items field in the list will be populated with the // result returned from the server. - List(ctx context.Context, list runtime.Object, opts ...ListOptionFunc) error + List(ctx context.Context, list ObjectList, opts ...ListOption) error } // Writer knows how to create, delete, and update Kubernetes objects. type Writer interface { - // Create saves the object obj in the Kubernetes cluster. - Create(ctx context.Context, obj runtime.Object, opts ...CreateOptionFunc) error + // Apply applies the given apply configuration to the Kubernetes cluster. + Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...ApplyOption) error + + // Create saves the object obj in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. + Create(ctx context.Context, obj Object, opts ...CreateOption) error // Delete deletes the given obj from Kubernetes cluster. - Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOptionFunc) error + Delete(ctx context.Context, obj Object, opts ...DeleteOption) error // Update updates the given obj in the Kubernetes cluster. obj must be a // struct pointer so that obj can be updated with the content returned by the Server. - Update(ctx context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error + Update(ctx context.Context, obj Object, opts ...UpdateOption) error // Patch patches the given obj in the Kubernetes cluster. obj must be a // struct pointer so that obj can be updated with the content returned by the Server. - Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error + Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error + + // DeleteAllOf deletes all objects of the given type matching the given options. + DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error } // StatusClient knows how to create a client which can update status subresource // for kubernetes objects. type StatusClient interface { - Status() StatusWriter + Status() SubResourceWriter +} + +// SubResourceClientConstructor knows how to create a client which can update subresource +// for kubernetes objects. +type SubResourceClientConstructor interface { + // SubResourceClientConstructor returns a subresource client for the named subResource. Known + // upstream subResources usages are: + // - ServiceAccount token creation: + // sa := &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}} + // token := &authenticationv1.TokenRequest{} + // c.SubResource("token").Create(ctx, sa, token) + // + // - Pod eviction creation: + // pod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}} + // c.SubResource("eviction").Create(ctx, pod, &policyv1.Eviction{}) + // + // - Pod binding creation: + // pod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}} + // binding := &corev1.Binding{Target: corev1.ObjectReference{Name: "my-node"}} + // c.SubResource("binding").Create(ctx, pod, binding) + // + // - CertificateSigningRequest approval: + // csr := &certificatesv1.CertificateSigningRequest{ + // ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}, + // Status: certificatesv1.CertificateSigningRequestStatus{ + // Conditions: []certificatesv1.[]CertificateSigningRequestCondition{{ + // Type: certificatesv1.CertificateApproved, + // Status: corev1.ConditionTrue, + // }}, + // }, + // } + // c.SubResource("approval").Update(ctx, csr) + // + // - Scale retrieval: + // dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}} + // scale := &autoscalingv1.Scale{} + // c.SubResource("scale").Get(ctx, dep, scale) + // + // - Scale update: + // dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}} + // scale := &autoscalingv1.Scale{Spec: autoscalingv1.ScaleSpec{Replicas: 2}} + // c.SubResource("scale").Update(ctx, dep, client.WithSubResourceBody(scale)) + SubResource(subResource string) SubResourceClient +} + +// StatusWriter is kept for backward compatibility. +type StatusWriter = SubResourceWriter + +// SubResourceReader knows how to read SubResources +type SubResourceReader interface { + Get(ctx context.Context, obj Object, subResource Object, opts ...SubResourceGetOption) error } -// StatusWriter knows how to update status subresource of a Kubernetes object. -type StatusWriter interface { +// SubResourceWriter knows how to update subresource of a Kubernetes object. +type SubResourceWriter interface { + // Create saves the subResource object in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. + Create(ctx context.Context, obj Object, subResource Object, opts ...SubResourceCreateOption) error + // Update updates the fields corresponding to the status subresource for the // given obj. obj must be a struct pointer so that obj can be updated // with the content returned by the Server. - Update(ctx context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error + Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error // Patch patches the given object's subresource. obj must be a struct // pointer so that obj can be updated with the content returned by the // Server. - Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error + Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error + + // Apply applies the given apply configurations subresource. + Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...SubResourceApplyOption) error +} + +// SubResourceClient knows how to perform CRU operations on Kubernetes objects. +type SubResourceClient interface { + SubResourceReader + SubResourceWriter } // Client knows how to perform CRUD operations on Kubernetes objects. @@ -102,24 +171,42 @@ type Client interface { Reader Writer StatusClient + SubResourceClientConstructor + + // Scheme returns the scheme this client is using. + Scheme() *runtime.Scheme + // RESTMapper returns the rest this client is using. + RESTMapper() meta.RESTMapper + // GroupVersionKindFor returns the GroupVersionKind for the given object. + GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) + // IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. + IsObjectNamespaced(obj runtime.Object) (bool, error) +} + +// WithWatch supports Watch on top of the CRUD operations supported by +// the normal Client. Its intended use-case are CLI apps that need to wait for +// events. +type WithWatch interface { + Client + Watch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error) } // IndexerFunc knows how to take an object and turn it into a series // of non-namespaced keys. Namespaced objects are automatically given // namespaced and non-spaced variants, so keys do not need to include namespace. -type IndexerFunc func(runtime.Object) []string +type IndexerFunc func(Object) []string // FieldIndexer knows how to index over a particular "field" such that it // can later be used by a field selector. type FieldIndexer interface { - // IndexFields adds an index with the given field name on the given object type + // IndexField adds an index with the given field name on the given object type // by using the given function to extract the value for that field. If you want // compatibility with the Kubernetes API server, only return one key, and only use // fields that the API server supports. Otherwise, you can return multiple keys, // and "equality" in the field selector means that at least one key matches the value. // The FieldIndexer will automatically take care of indexing over namespace // and supporting efficient all-namespace queries. - IndexField(obj runtime.Object, field string, extractValue IndexerFunc) error + IndexField(ctx context.Context, obj Object, field string, extractValue IndexerFunc) error } // IgnoreNotFound returns nil on NotFound errors. @@ -130,3 +217,13 @@ func IgnoreNotFound(err error) error { } return err } + +// IgnoreAlreadyExists returns nil on AlreadyExists errors. +// All other values that are not AlreadyExists errors or nil are returned unmodified. +func IgnoreAlreadyExists(err error) error { + if apierrors.IsAlreadyExists(err) { + return nil + } + + return err +} diff --git a/pkg/client/metadata_client.go b/pkg/client/metadata_client.go new file mode 100644 index 0000000000..d0c6b8e13a --- /dev/null +++ b/pkg/client/metadata_client.go @@ -0,0 +1,204 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/metadata" +) + +// TODO(directxman12): we could rewrite this on top of the low-level REST +// client to avoid the extra shallow copy at the end, but I'm not sure it's +// worth it -- the metadata client deals with falling back to loading the whole +// object on older API servers, etc, and we'd have to reproduce that. + +// metadataClient is a client that reads & writes metadata-only requests to/from the API server. +type metadataClient struct { + client metadata.Interface + restMapper meta.RESTMapper +} + +func (mc *metadataClient) getResourceInterface(gvk schema.GroupVersionKind, ns string) (metadata.ResourceInterface, error) { + mapping, err := mc.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + if mapping.Scope.Name() == meta.RESTScopeNameRoot { + return mc.client.Resource(mapping.Resource), nil + } + return mc.client.Resource(mapping.Resource).Namespace(ns), nil +} + +// Delete implements client.Client. +func (mc *metadataClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), metadata.Namespace) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + + return resInt.Delete(ctx, metadata.Name, *deleteOpts.AsDeleteOptions()) +} + +// DeleteAllOf implements client.Client. +func (mc *metadataClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), deleteAllOfOpts.ListOptions.Namespace) + if err != nil { + return err + } + + return resInt.DeleteCollection(ctx, *deleteAllOfOpts.AsDeleteOptions(), *deleteAllOfOpts.AsListOptions()) +} + +// Patch implements client.Client. +func (mc *metadataClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + resInt, err := mc.getResourceInterface(gvk, metadata.Namespace) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + patchOpts.ApplyOptions(opts) + + res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +// Get implements client.Client. +func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + + getOpts := GetOptions{} + getOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(gvk, key.Namespace) + if err != nil { + return err + } + + res, err := resInt.Get(ctx, key.Name, *getOpts.AsGetOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +// List implements client.Client. +func (mc *metadataClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadataList) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(gvk, listOpts.Namespace) + if err != nil { + return err + } + + res, err := resInt.List(ctx, *listOpts.AsListOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +func (mc *metadataClient) PatchSubResource(ctx context.Context, obj Object, subResource string, patch Patch, opts ...SubResourcePatchOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + resInt, err := mc.getResourceInterface(gvk, metadata.Namespace) + if err != nil { + return err + } + + patchOpts := &SubResourcePatchOptions{} + patchOpts.ApplyOptions(opts) + + body := obj + if patchOpts.SubResourceBody != nil { + body = patchOpts.SubResourceBody + } + + data, err := patch.Data(body) + if err != nil { + return err + } + + res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions(), subResource) + if err != nil { + return err + } + + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} diff --git a/pkg/client/namespaced_client.go b/pkg/client/namespaced_client.go new file mode 100644 index 0000000000..445e91b98b --- /dev/null +++ b/pkg/client/namespaced_client.go @@ -0,0 +1,328 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// NewNamespacedClient wraps an existing client enforcing the namespace value. +// All functions using this client will have the same namespace declared here. +func NewNamespacedClient(c Client, ns string) Client { + return &namespacedClient{ + client: c, + namespace: ns, + } +} + +var _ Client = &namespacedClient{} + +// namespacedClient is a Client that wraps another Client in order to enforce the specified namespace value. +type namespacedClient struct { + namespace string + client Client +} + +// Scheme returns the scheme this client is using. +func (n *namespacedClient) Scheme() *runtime.Scheme { + return n.client.Scheme() +} + +// RESTMapper returns the scheme this client is using. +func (n *namespacedClient) RESTMapper() meta.RESTMapper { + return n.client.RESTMapper() +} + +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (n *namespacedClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return n.client.GroupVersionKindFor(obj) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (n *namespacedClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return n.client.IsObjectNamespaced(obj) +} + +// Create implements client.Client. +func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + isNamespaceScoped, err := n.IsObjectNamespaced(obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Create(ctx, obj, opts...) +} + +// Update implements client.Client. +func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + isNamespaceScoped, err := n.IsObjectNamespaced(obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Update(ctx, obj, opts...) +} + +// Delete implements client.Client. +func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + isNamespaceScoped, err := n.IsObjectNamespaced(obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Delete(ctx, obj, opts...) +} + +// DeleteAllOf implements client.Client. +func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + isNamespaceScoped, err := n.IsObjectNamespaced(obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + if isNamespaceScoped { + opts = append(opts, InNamespace(n.namespace)) + } + return n.client.DeleteAllOf(ctx, obj, opts...) +} + +// Patch implements client.Client. +func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + isNamespaceScoped, err := n.IsObjectNamespaced(obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Patch(ctx, obj, patch, opts...) +} + +func (n *namespacedClient) setNamespaceForApplyConfigIfNamespaceScoped(obj runtime.ApplyConfiguration) error { + var gvk schema.GroupVersionKind + switch o := obj.(type) { + case applyConfiguration: + var err error + gvk, err = gvkFromApplyConfiguration(o) + if err != nil { + return err + } + case *unstructuredApplyConfiguration: + gvk = o.GroupVersionKind() + default: + return fmt.Errorf("object %T is not a valid apply configuration", obj) + } + isNamespaceScoped, err := apiutil.IsGVKNamespaced(gvk, n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + if isNamespaceScoped { + switch o := obj.(type) { + case applyConfiguration: + if o.GetNamespace() != nil && *o.GetNamespace() != "" && *o.GetNamespace() != n.namespace { + return fmt.Errorf("namespace %s provided for the object %s does not match the namespace %s on the client", + *o.GetNamespace(), ptr.Deref(o.GetName(), ""), n.namespace) + } + v := reflect.ValueOf(o) + withNamespace := v.MethodByName("WithNamespace") + if !withNamespace.IsValid() { + return fmt.Errorf("ApplyConfiguration %T does not have a WithNamespace method", o) + } + if tp := withNamespace.Type(); tp.NumIn() != 1 || tp.In(0).Kind() != reflect.String { + return fmt.Errorf("WithNamespace method of ApplyConfiguration %T must take a single string argument", o) + } + withNamespace.Call([]reflect.Value{reflect.ValueOf(n.namespace)}) + case *unstructuredApplyConfiguration: + if o.GetNamespace() != "" && o.GetNamespace() != n.namespace { + return fmt.Errorf("namespace %s provided for the object %s does not match the namespace %s on the client", + o.GetNamespace(), o.GetName(), n.namespace) + } + o.SetNamespace(n.namespace) + } + } + + return nil +} + +func (n *namespacedClient) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...ApplyOption) error { + if err := n.setNamespaceForApplyConfigIfNamespaceScoped(obj); err != nil { + return err + } + + return n.client.Apply(ctx, obj, opts...) +} + +// Get implements client.Client. +func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + isNamespaceScoped, err := n.IsObjectNamespaced(obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + if isNamespaceScoped { + if key.Namespace != "" && key.Namespace != n.namespace { + return fmt.Errorf("namespace %s provided for the object %s does not match the namespace %s on the client", key.Namespace, obj.GetName(), n.namespace) + } + key.Namespace = n.namespace + } + return n.client.Get(ctx, key, obj, opts...) +} + +// List implements client.Client. +func (n *namespacedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + if n.namespace != "" { + opts = append(opts, InNamespace(n.namespace)) + } + return n.client.List(ctx, obj, opts...) +} + +// Status implements client.StatusClient. +func (n *namespacedClient) Status() SubResourceWriter { + return n.SubResource("status") +} + +// SubResource implements client.SubResourceClient. +func (n *namespacedClient) SubResource(subResource string) SubResourceClient { + return &namespacedClientSubResourceClient{ + client: n.client.SubResource(subResource), + namespacedclient: n, + } +} + +// ensure namespacedClientSubResourceClient implements client.SubResourceClient. +var _ SubResourceClient = &namespacedClientSubResourceClient{} + +type namespacedClientSubResourceClient struct { + client SubResourceClient + namespacedclient *namespacedClient +} + +func (nsw *namespacedClientSubResourceClient) Get(ctx context.Context, obj, subResource Object, opts ...SubResourceGetOption) error { + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != nsw.namespacedclient.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespacedclient.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(nsw.namespacedclient.namespace) + } + + return nsw.client.Get(ctx, obj, subResource, opts...) +} + +func (nsw *namespacedClientSubResourceClient) Create(ctx context.Context, obj, subResource Object, opts ...SubResourceCreateOption) error { + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != nsw.namespacedclient.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespacedclient.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(nsw.namespacedclient.namespace) + } + + return nsw.client.Create(ctx, obj, subResource, opts...) +} + +// Update implements client.SubResourceWriter. +func (nsw *namespacedClientSubResourceClient) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != nsw.namespacedclient.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespacedclient.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(nsw.namespacedclient.namespace) + } + return nsw.client.Update(ctx, obj, opts...) +} + +// Patch implements client.SubResourceWriter. +func (nsw *namespacedClientSubResourceClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != nsw.namespacedclient.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespacedclient.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(nsw.namespacedclient.namespace) + } + return nsw.client.Patch(ctx, obj, patch, opts...) +} + +func (nsw *namespacedClientSubResourceClient) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...SubResourceApplyOption) error { + if err := nsw.namespacedclient.setNamespaceForApplyConfigIfNamespaceScoped(obj); err != nil { + return err + } + return nsw.client.Apply(ctx, obj, opts...) +} diff --git a/pkg/client/namespaced_client_test.go b/pkg/client/namespaced_client_test.go new file mode 100644 index 0000000000..deae881d4a --- /dev/null +++ b/pkg/client/namespaced_client_test.go @@ -0,0 +1,707 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client_test + +import ( + "context" + "encoding/json" + "fmt" + "sync/atomic" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + appsv1applyconfigurations "k8s.io/client-go/applyconfigurations/apps/v1" + corev1applyconfigurations "k8s.io/client-go/applyconfigurations/core/v1" + metav1applyconfigurations "k8s.io/client-go/applyconfigurations/meta/v1" + rbacv1applyconfigurations "k8s.io/client-go/applyconfigurations/rbac/v1" + "k8s.io/utils/ptr" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("NamespacedClient", func() { + var dep *appsv1.Deployment + var acDep *appsv1applyconfigurations.DeploymentApplyConfiguration + var ns = "default" + var count uint64 = 0 + var replicaCount int32 = 2 + + getClient := func() client.Client { + var sch = runtime.NewScheme() + + err := rbacv1.AddToScheme(sch) + Expect(err).ToNot(HaveOccurred()) + err = appsv1.AddToScheme(sch) + Expect(err).ToNot(HaveOccurred()) + + nonNamespacedClient, err := client.New(cfg, client.Options{Scheme: sch}) + Expect(err).NotTo(HaveOccurred()) + Expect(nonNamespacedClient).NotTo(BeNil()) + return client.NewNamespacedClient(nonNamespacedClient, ns) + } + + BeforeEach(func() { + atomic.AddUint64(&count, 1) + dep = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("namespaced-deployment-%v", count), + Labels: map[string]string{"name": fmt.Sprintf("namespaced-deployment-%v", count)}, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicaCount, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + acDep = appsv1applyconfigurations.Deployment(dep.Name, ""). + WithLabels(dep.Labels). + WithSpec(appsv1applyconfigurations.DeploymentSpec(). + WithReplicas(*dep.Spec.Replicas). + WithSelector(metav1applyconfigurations.LabelSelector().WithMatchLabels(dep.Spec.Selector.MatchLabels)). + WithTemplate(corev1applyconfigurations.PodTemplateSpec(). + WithLabels(dep.Spec.Template.Labels). + WithSpec(corev1applyconfigurations.PodSpec(). + WithContainers(corev1applyconfigurations.Container(). + WithName(dep.Spec.Template.Spec.Containers[0].Name). + WithImage(dep.Spec.Template.Spec.Containers[0].Image), + ), + ), + ), + ) + + }) + + Describe("Get", func() { + BeforeEach(func(ctx SpecContext) { + var err error + dep, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func(ctx SpecContext) { + deleteDeployment(ctx, dep, ns) + }) + + It("should successfully Get a namespace-scoped object", func(ctx SpecContext) { + name := types.NamespacedName{Name: dep.Name} + result := &appsv1.Deployment{} + + Expect(getClient().Get(ctx, name, result)).NotTo(HaveOccurred()) + Expect(result).To(BeEquivalentTo(dep)) + }) + + It("should error when namespace provided in the object is different than the one "+ + "specified in client", func(ctx SpecContext) { + name := types.NamespacedName{Name: dep.Name, Namespace: "non-default"} + result := &appsv1.Deployment{} + + Expect(getClient().Get(ctx, name, result)).To(HaveOccurred()) + }) + }) + + Describe("List", func() { + BeforeEach(func(ctx SpecContext) { + var err error + dep, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func(ctx SpecContext) { + deleteDeployment(ctx, dep, ns) + }) + + It("should successfully List objects when namespace is not specified with the object", func(ctx SpecContext) { + result := &appsv1.DeploymentList{} + opts := client.MatchingLabels(dep.Labels) + + Expect(getClient().List(ctx, result, opts)).NotTo(HaveOccurred()) + Expect(len(result.Items)).To(BeEquivalentTo(1)) + Expect(result.Items[0]).To(BeEquivalentTo(*dep)) + }) + + It("should List objects from the namespace specified in the client", func(ctx SpecContext) { + result := &appsv1.DeploymentList{} + opts := client.InNamespace("non-default") + + Expect(getClient().List(ctx, result, opts)).NotTo(HaveOccurred()) + Expect(len(result.Items)).To(BeEquivalentTo(1)) + Expect(result.Items[0]).To(BeEquivalentTo(*dep)) + }) + }) + + Describe("Apply", func() { + AfterEach(func(ctx SpecContext) { + deleteDeployment(ctx, dep, ns) + }) + + It("should successfully apply an object in the right namespace", func(ctx SpecContext) { + err := getClient().Apply(ctx, acDep, client.FieldOwner("test")) + Expect(err).NotTo(HaveOccurred()) + + res, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(res.GetNamespace()).To(BeEquivalentTo(ns)) + }) + + It("should successfully apply an object in the right namespace through unstructured", func(ctx SpecContext) { + serialized, err := json.Marshal(acDep) + Expect(err).NotTo(HaveOccurred()) + u := &unstructured.Unstructured{} + Expect(json.Unmarshal(serialized, &u.Object)).To(Succeed()) + err = getClient().Apply(ctx, client.ApplyConfigurationFromUnstructured(u), client.FieldOwner("test")) + Expect(err).NotTo(HaveOccurred()) + + res, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(res.GetNamespace()).To(BeEquivalentTo(ns)) + }) + + It("should not create an object if the namespace of the object is different", func(ctx SpecContext) { + acDep.WithNamespace("non-default") + err := getClient().Apply(ctx, acDep, client.FieldOwner("test")) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("does not match the namespace")) + }) + + It("should not create an object through unstructured if the namespace of the object is different", func(ctx SpecContext) { + acDep.WithNamespace("non-default") + serialized, err := json.Marshal(acDep) + Expect(err).NotTo(HaveOccurred()) + u := &unstructured.Unstructured{} + Expect(json.Unmarshal(serialized, &u.Object)).To(Succeed()) + err = getClient().Apply(ctx, client.ApplyConfigurationFromUnstructured(u), client.FieldOwner("test")) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("does not match the namespace")) + }) + + It("should create a cluster scoped object", func(ctx SpecContext) { + cr := rbacv1applyconfigurations.ClusterRole(fmt.Sprintf("clusterRole-%v", count)) + + err := getClient().Apply(ctx, cr, client.FieldOwner("test")) + Expect(err).NotTo(HaveOccurred()) + + By("checking if the object was created") + res, err := clientset.RbacV1().ClusterRoles().Get(ctx, *cr.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(res).NotTo(BeNil()) + + deleteClusterRole(ctx, &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: *cr.Name}}) + }) + }) + + Describe("Create", func() { + AfterEach(func(ctx SpecContext) { + deleteDeployment(ctx, dep, ns) + }) + + It("should successfully create object in the right namespace", func(ctx SpecContext) { + By("creating the object initially") + err := getClient().Create(ctx, dep) + Expect(err).NotTo(HaveOccurred()) + + By("checking if the object was created in the right namespace") + res, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(res.GetNamespace()).To(BeEquivalentTo(ns)) + }) + + It("should not create object if the namespace of the object is different", func(ctx SpecContext) { + By("creating the object initially") + dep.SetNamespace("non-default") + err := getClient().Create(ctx, dep) + Expect(err).To(HaveOccurred()) + }) + It("should create a cluster scoped object", func(ctx SpecContext) { + cr := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("clusterRole-%v", count), + Labels: map[string]string{"name": fmt.Sprintf("clusterRole-%v", count)}, + }, + } + cr.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + }) + + By("creating the object initially") + err := getClient().Create(ctx, cr) + Expect(err).NotTo(HaveOccurred()) + + By("checking if the object was created") + res, err := clientset.RbacV1().ClusterRoles().Get(ctx, cr.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(res).NotTo(BeNil()) + + // Delete the clusterRole Resource + deleteClusterRole(ctx, cr) + }) + }) + + Describe("Update", func() { + var err error + BeforeEach(func(ctx SpecContext) { + dep, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + dep.Annotations = map[string]string{"foo": "bar"} + Expect(err).NotTo(HaveOccurred()) + }) + AfterEach(func(ctx SpecContext) { + deleteDeployment(ctx, dep, ns) + }) + + It("should successfully update the provided object", func(ctx SpecContext) { + By("updating the Deployment") + err = getClient().Update(ctx, dep) + Expect(err).NotTo(HaveOccurred()) + + By("validating if the updated Deployment has new annotation") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.GetNamespace()).To(Equal(ns)) + Expect(actual.Annotations["foo"]).To(Equal("bar")) + }) + + It("should successfully update the provided object when namespace is not provided", func(ctx SpecContext) { + By("updating the Deployment") + dep.SetNamespace("") + err = getClient().Update(ctx, dep) + Expect(err).NotTo(HaveOccurred()) + + By("validating if the updated Deployment has new annotation") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.GetNamespace()).To(Equal(ns)) + Expect(actual.Annotations["foo"]).To(Equal("bar")) + }) + + It("should not update when object namespace is different", func(ctx SpecContext) { + By("updating the Deployment") + dep.SetNamespace("non-default") + err = getClient().Update(ctx, dep) + Expect(err).To(HaveOccurred()) + }) + + It("should not update any object from other namespace", func(ctx SpecContext) { + By("creating a new namespace") + tns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "non-default-1"}} + _, err := clientset.CoreV1().Namespaces().Create(ctx, tns, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + changedDep := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "changed-dep", + Namespace: tns.Name, + Labels: map[string]string{"name": "changed-dep"}, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicaCount, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + changedDep.Annotations = map[string]string{"foo": "bar"} + + By("creating the object initially") + _, err = clientset.AppsV1().Deployments(tns.Name).Create(ctx, changedDep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating the object") + err = getClient().Update(ctx, changedDep) + Expect(err).To(HaveOccurred()) + + deleteDeployment(ctx, changedDep, tns.Name) + deleteNamespace(ctx, tns) + }) + + It("should update a cluster scoped resource", func(ctx SpecContext) { + changedCR := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("clusterRole-%v", count), + Labels: map[string]string{"name": fmt.Sprintf("clusterRole-%v", count)}, + }, + } + + changedCR.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + }) + + By("Setting annotations and creating the resource") + changedCR.Annotations = map[string]string{"foo": "bar"} + changedCR, err = clientset.RbacV1().ClusterRoles().Create(ctx, changedCR, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("updating the deployment") + err = getClient().Update(ctx, changedCR) + + By("validating if the cluster role was update") + actual, err := clientset.RbacV1().ClusterRoles().Get(ctx, changedCR.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.Annotations["foo"]).To(Equal("bar")) + + // delete cluster role resource + deleteClusterRole(ctx, changedCR) + }) + + }) + + Describe("Patch", func() { + var err error + BeforeEach(func(ctx SpecContext) { + dep, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func(ctx SpecContext) { + deleteDeployment(ctx, dep, ns) + }) + + It("should successfully modify the object using Patch", func(ctx SpecContext) { + By("Applying Patch") + err = getClient().Patch(ctx, dep, client.RawPatch(types.MergePatchType, generatePatch())) + Expect(err).NotTo(HaveOccurred()) + + By("validating patched Deployment has new annotations") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual.Annotations["foo"]).To(Equal("bar")) + Expect(actual.GetNamespace()).To(Equal(ns)) + }) + + It("should successfully modify the object using Patch when namespace is not provided", func(ctx SpecContext) { + By("Applying Patch") + dep.SetNamespace("") + err = getClient().Patch(ctx, dep, client.RawPatch(types.MergePatchType, generatePatch())) + Expect(err).NotTo(HaveOccurred()) + + By("validating patched Deployment has new annotations") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual.Annotations["foo"]).To(Equal("bar")) + Expect(actual.GetNamespace()).To(Equal(ns)) + }) + + It("should not modify the object when namespace of the object is different", func(ctx SpecContext) { + dep.SetNamespace("non-default") + err = getClient().Patch(ctx, dep, client.RawPatch(types.MergePatchType, generatePatch())) + Expect(err).To(HaveOccurred()) + }) + + It("should not modify an object from a different namespace", func(ctx SpecContext) { + By("creating a new namespace") + tns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "non-default-2"}} + _, err := clientset.CoreV1().Namespaces().Create(ctx, tns, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + changedDep := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "changed-dep", + Namespace: tns.Name, + Labels: map[string]string{"name": "changed-dep"}, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicaCount, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + + By("creating the object initially") + changedDep, err = clientset.AppsV1().Deployments(tns.Name).Create(ctx, changedDep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + err = getClient().Patch(ctx, changedDep, client.RawPatch(types.MergePatchType, generatePatch())) + Expect(err).To(HaveOccurred()) + + deleteDeployment(ctx, changedDep, tns.Name) + deleteNamespace(ctx, tns) + }) + + It("should successfully modify cluster scoped resource", func(ctx SpecContext) { + cr := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("clusterRole-%v", count), + Labels: map[string]string{"name": fmt.Sprintf("clusterRole-%v", count)}, + }, + } + + cr.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + }) + + By("creating the resource") + cr, err = clientset.RbacV1().ClusterRoles().Create(ctx, cr, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + + By("Applying Patch") + err = getClient().Patch(ctx, cr, client.RawPatch(types.MergePatchType, generatePatch())) + Expect(err).NotTo(HaveOccurred()) + + By("Validating the patch") + actual, err := clientset.RbacV1().ClusterRoles().Get(ctx, cr.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual.Annotations["foo"]).To(Equal("bar")) + + // delete the resource + deleteClusterRole(ctx, cr) + }) + }) + + Describe("Delete and DeleteAllOf", func() { + var err error + BeforeEach(func(ctx SpecContext) { + dep, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func(ctx SpecContext) { + deleteDeployment(ctx, dep, ns) + }) + It("should successfully delete an object when namespace is not specified", func(ctx SpecContext) { + By("deleting the object") + dep.SetNamespace("") + err = getClient().Delete(ctx, dep) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Deployment no longer exists") + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) + + It("should successfully delete all of the deployments in the given namespace", func(ctx SpecContext) { + By("Deleting all objects in the namespace") + err = getClient().DeleteAllOf(ctx, dep) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Deployment no longer exists") + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + }) + + It("should not delete deployments in other namespaces", func(ctx SpecContext) { + tns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "non-default-3"}} + _, err = clientset.CoreV1().Namespaces().Create(ctx, tns, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + changedDep := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "changed-dep", + Namespace: tns.Name, + Labels: map[string]string{"name": "changed-dep"}, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicaCount, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + + By("creating the object initially in other namespace") + changedDep, err = clientset.AppsV1().Deployments(tns.Name).Create(ctx, changedDep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + err = getClient().DeleteAllOf(ctx, dep) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Deployment exists") + actual, err := clientset.AppsV1().Deployments(tns.Name).Get(ctx, changedDep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).To(BeEquivalentTo(changedDep)) + + deleteDeployment(ctx, changedDep, tns.Name) + deleteNamespace(ctx, tns) + }) + }) + + Describe("SubResourceWriter", func() { + var err error + BeforeEach(func(ctx SpecContext) { + dep, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func(ctx SpecContext) { + deleteDeployment(ctx, dep, ns) + }) + + It("should change objects via update status", func(ctx SpecContext) { + changedDep := dep.DeepCopy() + changedDep.Status.Replicas = 99 + + Expect(getClient().SubResource("status").Update(ctx, changedDep)).NotTo(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.GetNamespace()).To(BeEquivalentTo(ns)) + Expect(actual.Status.Replicas).To(BeEquivalentTo(99)) + }) + + It("should not change objects via update status when object namespace is different", func(ctx SpecContext) { + changedDep := dep.DeepCopy() + changedDep.SetNamespace("test") + changedDep.Status.Replicas = 99 + + Expect(getClient().SubResource("status").Update(ctx, changedDep)).To(HaveOccurred()) + }) + + It("should change objects via status patch", func(ctx SpecContext) { + changedDep := dep.DeepCopy() + changedDep.Status.Replicas = 99 + + Expect(getClient().SubResource("status").Patch(ctx, changedDep, client.MergeFrom(dep))).NotTo(HaveOccurred()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.GetNamespace()).To(BeEquivalentTo(ns)) + Expect(actual.Status.Replicas).To(BeEquivalentTo(99)) + }) + + It("should not change objects via status patch when object namespace is different", func(ctx SpecContext) { + changedDep := dep.DeepCopy() + changedDep.Status.Replicas = 99 + changedDep.SetNamespace("test") + + Expect(getClient().SubResource("status").Patch(ctx, changedDep, client.MergeFrom(dep))).To(HaveOccurred()) + }) + + It("should change objects via status apply", func(ctx SpecContext) { + deploymentAC, err := appsv1applyconfigurations.ExtractDeployment(dep, "test-owner") + Expect(err).NotTo(HaveOccurred()) + deploymentAC.WithStatus(&appsv1applyconfigurations.DeploymentStatusApplyConfiguration{ + Replicas: ptr.To(int32(99)), + }) + + Expect(getClient().SubResource("status").Apply(ctx, deploymentAC, client.FieldOwner("test-owner"))).To(Succeed()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.GetNamespace()).To(BeEquivalentTo(ns)) + Expect(actual.Status.Replicas).To(BeEquivalentTo(99)) + }) + + It("should set namespace on ApplyConfiguration when applying via SubResource", func(ctx SpecContext) { + deploymentAC := appsv1applyconfigurations.Deployment(dep.Name, "") + deploymentAC.WithStatus(&appsv1applyconfigurations.DeploymentStatusApplyConfiguration{ + Replicas: ptr.To(int32(50)), + }) + + Expect(getClient().SubResource("status").Apply(ctx, deploymentAC, client.FieldOwner("test-owner"))).To(Succeed()) + + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.GetNamespace()).To(BeEquivalentTo(ns)) + Expect(actual.Status.Replicas).To(BeEquivalentTo(50)) + }) + + It("should fail when applying via SubResource with conflicting namespace", func(ctx SpecContext) { + deploymentAC := appsv1applyconfigurations.Deployment(dep.Name, "different-namespace") + deploymentAC.WithStatus(&appsv1applyconfigurations.DeploymentStatusApplyConfiguration{ + Replicas: ptr.To(int32(25)), + }) + + err := getClient().SubResource("status").Apply(ctx, deploymentAC, client.FieldOwner("test-owner")) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("namespace")) + }) + }) + + Describe("Test on invalid objects", func() { + It("should refuse to perform operations on invalid object", func(ctx SpecContext) { + err := getClient().Create(ctx, nil) + Expect(err).To(HaveOccurred()) + + err = getClient().List(ctx, nil) + Expect(err).To(HaveOccurred()) + + err = getClient().Patch(ctx, nil, client.MergeFrom(dep)) + Expect(err).To(HaveOccurred()) + + err = getClient().Update(ctx, nil) + Expect(err).To(HaveOccurred()) + + err = getClient().Delete(ctx, nil) + Expect(err).To(HaveOccurred()) + + err = getClient().Status().Patch(ctx, nil, client.MergeFrom(dep)) + Expect(err).To(HaveOccurred()) + + err = getClient().Status().Update(ctx, nil) + Expect(err).To(HaveOccurred()) + + }) + + }) +}) + +func generatePatch() []byte { + mergePatch, err := json.Marshal(map[string]interface{}{ + "metadata": map[string]interface{}{ + "annotations": map[string]interface{}{ + "foo": "bar", + }, + }, + }) + Expect(err).NotTo(HaveOccurred()) + return mergePatch +} + +func deleteClusterRole(ctx context.Context, cr *rbacv1.ClusterRole) { + _, err := clientset.RbacV1().ClusterRoles().Get(ctx, cr.Name, metav1.GetOptions{}) + if err == nil { + err = clientset.RbacV1().ClusterRoles().Delete(ctx, cr.Name, metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + } +} diff --git a/pkg/client/object.go b/pkg/client/object.go new file mode 100644 index 0000000000..31e334d6c2 --- /dev/null +++ b/pkg/client/object.go @@ -0,0 +1,77 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// Object is a Kubernetes object, allows functions to work indistinctly with +// any resource that implements both Object interfaces. +// +// Semantically, these are objects which are both serializable (runtime.Object) +// and identifiable (metav1.Object) -- think any object which you could write +// as YAML or JSON, and then `kubectl create`. +// +// Code-wise, this means that any object which embeds both ObjectMeta (which +// provides metav1.Object) and TypeMeta (which provides half of runtime.Object) +// and has a `DeepCopyObject` implementation (the other half of runtime.Object) +// will implement this by default. +// +// For example, nearly all the built-in types are Objects, as well as all +// KubeBuilder-generated CRDs (unless you do something real funky to them). +// +// By and large, most things that implement runtime.Object also implement +// Object -- it's very rare to have *just* a runtime.Object implementation (the +// cases tend to be funky built-in types like Webhook payloads that don't have +// a `metadata` field). +// +// Notice that XYZList types are distinct: they implement ObjectList instead. +type Object interface { + metav1.Object + runtime.Object +} + +// ObjectList is a Kubernetes object list, allows functions to work +// indistinctly with any resource that implements both runtime.Object and +// metav1.ListInterface interfaces. +// +// Semantically, this is any object which may be serialized (ObjectMeta), and +// is a kubernetes list wrapper (has items, pagination fields, etc) -- think +// the wrapper used in a response from a `kubectl list --output yaml` call. +// +// Code-wise, this means that any object which embedds both ListMeta (which +// provides metav1.ListInterface) and TypeMeta (which provides half of +// runtime.Object) and has a `DeepCopyObject` implementation (the other half of +// runtime.Object) will implement this by default. +// +// For example, nearly all the built-in XYZList types are ObjectLists, as well +// as the XYZList types for all KubeBuilder-generated CRDs (unless you do +// something real funky to them). +// +// By and large, most things that are XYZList and implement runtime.Object also +// implement ObjectList -- it's very rare to have *just* a runtime.Object +// implementation (the cases tend to be funky built-in types like Webhook +// payloads that don't have a `metadata` field). +// +// This is similar to Object, which is almost always implemented by the items +// in the list themselves. +type ObjectList interface { + metav1.ListInterface + runtime.Object +} diff --git a/pkg/client/options.go b/pkg/client/options.go index 46ddf66f97..a6b921171a 100644 --- a/pkg/client/options.go +++ b/pkg/client/options.go @@ -20,8 +20,224 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/utils/ptr" ) +// {{{ "Functional" Option Interfaces + +// CreateOption is some configuration that modifies options for a create request. +type CreateOption interface { + // ApplyToCreate applies this configuration to the given create options. + ApplyToCreate(*CreateOptions) +} + +// DeleteOption is some configuration that modifies options for a delete request. +type DeleteOption interface { + // ApplyToDelete applies this configuration to the given delete options. + ApplyToDelete(*DeleteOptions) +} + +// GetOption is some configuration that modifies options for a get request. +type GetOption interface { + // ApplyToGet applies this configuration to the given get options. + ApplyToGet(*GetOptions) +} + +// ListOption is some configuration that modifies options for a list request. +type ListOption interface { + // ApplyToList applies this configuration to the given list options. + ApplyToList(*ListOptions) +} + +// UpdateOption is some configuration that modifies options for a update request. +type UpdateOption interface { + // ApplyToUpdate applies this configuration to the given update options. + ApplyToUpdate(*UpdateOptions) +} + +// PatchOption is some configuration that modifies options for a patch request. +type PatchOption interface { + // ApplyToPatch applies this configuration to the given patch options. + ApplyToPatch(*PatchOptions) +} + +// ApplyOption is some configuration that modifies options for an apply request. +type ApplyOption interface { + // ApplyToApply applies this configuration to the given apply options. + ApplyToApply(*ApplyOptions) +} + +// DeleteAllOfOption is some configuration that modifies options for a delete request. +type DeleteAllOfOption interface { + // ApplyToDeleteAllOf applies this configuration to the given deletecollection options. + ApplyToDeleteAllOf(*DeleteAllOfOptions) +} + +// SubResourceGetOption modifies options for a SubResource Get request. +type SubResourceGetOption interface { + ApplyToSubResourceGet(*SubResourceGetOptions) +} + +// SubResourceUpdateOption is some configuration that modifies options for a update request. +type SubResourceUpdateOption interface { + // ApplyToSubResourceUpdate applies this configuration to the given update options. + ApplyToSubResourceUpdate(*SubResourceUpdateOptions) +} + +// SubResourceCreateOption is some configuration that modifies options for a create request. +type SubResourceCreateOption interface { + // ApplyToSubResourceCreate applies this configuration to the given create options. + ApplyToSubResourceCreate(*SubResourceCreateOptions) +} + +// SubResourcePatchOption configures a subresource patch request. +type SubResourcePatchOption interface { + // ApplyToSubResourcePatch applies the configuration on the given patch options. + ApplyToSubResourcePatch(*SubResourcePatchOptions) +} + +// SubResourceApplyOption configures a subresource apply request. +type SubResourceApplyOption interface { + // ApplyToSubResourceApply applies the configuration on the given patch options. + ApplyToSubResourceApply(*SubResourceApplyOptions) +} + +// }}} + +// {{{ Multi-Type Options + +// DryRunAll sets the "dry run" option to "all", executing all +// validation, etc without persisting the change to storage. +var DryRunAll = dryRunAll{} + +type dryRunAll struct{} + +// ApplyToCreate applies this configuration to the given create options. +func (dryRunAll) ApplyToCreate(opts *CreateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToUpdate applies this configuration to the given update options. +func (dryRunAll) ApplyToUpdate(opts *UpdateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToPatch applies this configuration to the given patch options. +func (dryRunAll) ApplyToPatch(opts *PatchOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToApply applies this configuration to the given apply options. +func (dryRunAll) ApplyToApply(opts *ApplyOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToDelete applies this configuration to the given delete options. +func (dryRunAll) ApplyToDelete(opts *DeleteOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +func (dryRunAll) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +func (dryRunAll) ApplyToSubResourceCreate(opts *SubResourceCreateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +func (dryRunAll) ApplyToSubResourceUpdate(opts *SubResourceUpdateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +func (dryRunAll) ApplyToSubResourcePatch(opts *SubResourcePatchOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +func (dryRunAll) ApplyToSubResourceApply(opts *SubResourceApplyOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// FieldOwner set the field manager name for the given server-side apply patch. +type FieldOwner string + +// ApplyToPatch applies this configuration to the given patch options. +func (f FieldOwner) ApplyToPatch(opts *PatchOptions) { + opts.FieldManager = string(f) +} + +// ApplyToCreate applies this configuration to the given create options. +func (f FieldOwner) ApplyToCreate(opts *CreateOptions) { + opts.FieldManager = string(f) +} + +// ApplyToUpdate applies this configuration to the given update options. +func (f FieldOwner) ApplyToUpdate(opts *UpdateOptions) { + opts.FieldManager = string(f) +} + +// ApplyToApply applies this configuration to the given apply options. +func (f FieldOwner) ApplyToApply(opts *ApplyOptions) { + opts.FieldManager = string(f) +} + +// ApplyToSubResourcePatch applies this configuration to the given patch options. +func (f FieldOwner) ApplyToSubResourcePatch(opts *SubResourcePatchOptions) { + opts.FieldManager = string(f) +} + +// ApplyToSubResourceCreate applies this configuration to the given create options. +func (f FieldOwner) ApplyToSubResourceCreate(opts *SubResourceCreateOptions) { + opts.FieldManager = string(f) +} + +// ApplyToSubResourceUpdate applies this configuration to the given update options. +func (f FieldOwner) ApplyToSubResourceUpdate(opts *SubResourceUpdateOptions) { + opts.FieldManager = string(f) +} + +// ApplyToSubResourceApply applies this configuration to the given apply options. +func (f FieldOwner) ApplyToSubResourceApply(opts *SubResourceApplyOptions) { + opts.FieldManager = string(f) +} + +// FieldValidation configures field validation for the given requests. +type FieldValidation string + +// ApplyToPatch applies this configuration to the given patch options. +func (f FieldValidation) ApplyToPatch(opts *PatchOptions) { + opts.FieldValidation = string(f) +} + +// ApplyToCreate applies this configuration to the given create options. +func (f FieldValidation) ApplyToCreate(opts *CreateOptions) { + opts.FieldValidation = string(f) +} + +// ApplyToUpdate applies this configuration to the given update options. +func (f FieldValidation) ApplyToUpdate(opts *UpdateOptions) { + opts.FieldValidation = string(f) +} + +// ApplyToSubResourcePatch applies this configuration to the given patch options. +func (f FieldValidation) ApplyToSubResourcePatch(opts *SubResourcePatchOptions) { + opts.FieldValidation = string(f) +} + +// ApplyToSubResourceCreate applies this configuration to the given create options. +func (f FieldValidation) ApplyToSubResourceCreate(opts *SubResourceCreateOptions) { + opts.FieldValidation = string(f) +} + +// ApplyToSubResourceUpdate applies this configuration to the given update options. +func (f FieldValidation) ApplyToSubResourceUpdate(opts *SubResourceUpdateOptions) { + opts.FieldValidation = string(f) +} + +// }}} + +// {{{ Create Options + // CreateOptions contains options for create requests. It's generally a subset // of metav1.CreateOptions. type CreateOptions struct { @@ -32,6 +248,28 @@ type CreateOptions struct { // - All: all dry run stages will be processed DryRun []string + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // fieldValidation instructs the server on how to handle + // objects in the request (POST/PUT/PATCH) containing unknown + // or duplicate fields. Valid values are: + // - Ignore: This will ignore any unknown fields that are silently + // dropped from the object, and will ignore all but the last duplicate + // field that the decoder encounters. This is the default behavior + // prior to v1.23. + // - Warn: This will send a warning via the standard warning response + // header for each unknown field that is dropped from the object, and + // for each duplicate field that is encountered. The request will + // still succeed if there are no other errors, and will only persist + // the last of any duplicate fields. This is the default in v1.23+ + // - Strict: This will fail the request with a BadRequest error if + // any unknown fields would be dropped from the object, or if any + // duplicate fields are present. The error returned from the server + // will contain all unknown and duplicate fields encountered. + FieldValidation string + // Raw represents raw CreateOptions, as passed to the API server. Raw *metav1.CreateOptions } @@ -39,7 +277,6 @@ type CreateOptions struct { // AsCreateOptions returns these options as a metav1.CreateOptions. // This may mutate the Raw field. func (o *CreateOptions) AsCreateOptions() *metav1.CreateOptions { - if o == nil { return &metav1.CreateOptions{} } @@ -48,29 +285,42 @@ func (o *CreateOptions) AsCreateOptions() *metav1.CreateOptions { } o.Raw.DryRun = o.DryRun + o.Raw.FieldManager = o.FieldManager + o.Raw.FieldValidation = o.FieldValidation return o.Raw } -// ApplyOptions executes the given CreateOptionFuncs and returns the mutated -// CreateOptions. -func (o *CreateOptions) ApplyOptions(optFuncs []CreateOptionFunc) *CreateOptions { - for _, optFunc := range optFuncs { - optFunc(o) +// ApplyOptions applies the given create options on these options, +// and then returns itself (for convenient chaining). +func (o *CreateOptions) ApplyOptions(opts []CreateOption) *CreateOptions { + for _, opt := range opts { + opt.ApplyToCreate(o) } return o } -// CreateOptionFunc is a function that mutates a CreateOptions struct. It implements -// the functional options pattern. See -// https://github.com/tmrts/go-patterns/blob/master/idiom/functional-options.md. -type CreateOptionFunc func(*CreateOptions) - -// CreateDryRunAll is a functional option that sets the DryRun -// field of a CreateOptions struct to metav1.DryRunAll. -var CreateDryRunAll CreateOptionFunc = func(opts *CreateOptions) { - opts.DryRun = []string{metav1.DryRunAll} +// ApplyToCreate implements CreateOption. +func (o *CreateOptions) ApplyToCreate(co *CreateOptions) { + if o.DryRun != nil { + co.DryRun = o.DryRun + } + if o.FieldManager != "" { + co.FieldManager = o.FieldManager + } + if o.FieldValidation != "" { + co.FieldValidation = o.FieldValidation + } + if o.Raw != nil { + co.Raw = o.Raw + } } +var _ CreateOption = &CreateOptions{} + +// }}} + +// {{{ Delete Options + // DeleteOptions contains options for delete requests. It's generally a subset // of metav1.DeleteOptions. type DeleteOptions struct { @@ -96,12 +346,18 @@ type DeleteOptions struct { // Raw represents raw DeleteOptions, as passed to the API server. Raw *metav1.DeleteOptions + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string } // AsDeleteOptions returns these options as a metav1.DeleteOptions. // This may mutate the Raw field. func (o *DeleteOptions) AsDeleteOptions() *metav1.DeleteOptions { - if o == nil { return &metav1.DeleteOptions{} } @@ -112,90 +368,208 @@ func (o *DeleteOptions) AsDeleteOptions() *metav1.DeleteOptions { o.Raw.GracePeriodSeconds = o.GracePeriodSeconds o.Raw.Preconditions = o.Preconditions o.Raw.PropagationPolicy = o.PropagationPolicy + o.Raw.DryRun = o.DryRun return o.Raw } -// ApplyOptions executes the given DeleteOptionFuncs and returns the mutated -// DeleteOptions. -func (o *DeleteOptions) ApplyOptions(optFuncs []DeleteOptionFunc) *DeleteOptions { - for _, optFunc := range optFuncs { - optFunc(o) +// ApplyOptions applies the given delete options on these options, +// and then returns itself (for convenient chaining). +func (o *DeleteOptions) ApplyOptions(opts []DeleteOption) *DeleteOptions { + for _, opt := range opts { + opt.ApplyToDelete(o) } return o } -// DeleteOptionFunc is a function that mutates a DeleteOptions struct. It implements -// the functional options pattern. See -// https://github.com/tmrts/go-patterns/blob/master/idiom/functional-options.md. -type DeleteOptionFunc func(*DeleteOptions) +var _ DeleteOption = &DeleteOptions{} + +// ApplyToDelete implements DeleteOption. +func (o *DeleteOptions) ApplyToDelete(do *DeleteOptions) { + if o.GracePeriodSeconds != nil { + do.GracePeriodSeconds = o.GracePeriodSeconds + } + if o.Preconditions != nil { + do.Preconditions = o.Preconditions + } + if o.PropagationPolicy != nil { + do.PropagationPolicy = o.PropagationPolicy + } + if o.Raw != nil { + do.Raw = o.Raw + } + if o.DryRun != nil { + do.DryRun = o.DryRun + } +} + +// GracePeriodSeconds sets the grace period for the deletion +// to the given number of seconds. +type GracePeriodSeconds int64 + +// ApplyToDelete applies this configuration to the given delete options. +func (s GracePeriodSeconds) ApplyToDelete(opts *DeleteOptions) { + secs := int64(s) + opts.GracePeriodSeconds = &secs +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (s GracePeriodSeconds) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + s.ApplyToDelete(&opts.DeleteOptions) +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions metav1.Preconditions + +// ApplyToDelete applies this configuration to the given delete options. +func (p Preconditions) ApplyToDelete(opts *DeleteOptions) { + preconds := metav1.Preconditions(p) + opts.Preconditions = &preconds +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (p Preconditions) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + p.ApplyToDelete(&opts.DeleteOptions) +} + +// PropagationPolicy determined whether and how garbage collection will be +// performed. Either this field or OrphanDependents may be set, but not both. +// The default policy is decided by the existing finalizer set in the +// metadata.finalizers and the resource-specific default policy. +// Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - +// allow the garbage collector to delete the dependents in the background; +// 'Foreground' - a cascading policy that deletes all dependents in the +// foreground. +type PropagationPolicy metav1.DeletionPropagation + +// ApplyToDelete applies the given delete options on these options. +// It will propagate to the dependents of the object to let the garbage collector handle it. +func (p PropagationPolicy) ApplyToDelete(opts *DeleteOptions) { + policy := metav1.DeletionPropagation(p) + opts.PropagationPolicy = &policy +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (p PropagationPolicy) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + p.ApplyToDelete(&opts.DeleteOptions) +} + +// }}} + +// {{{ Get Options + +// GetOptions contains options for get operation. +// Now it only has a Raw field, with support for specific resourceVersion. +type GetOptions struct { + // Raw represents raw GetOptions, as passed to the API server. Note + // that these may not be respected by all implementations of interface. + Raw *metav1.GetOptions -// GracePeriodSeconds is a functional option that sets the GracePeriodSeconds -// field of a DeleteOptions struct. -func GracePeriodSeconds(gp int64) DeleteOptionFunc { - return func(opts *DeleteOptions) { - opts.GracePeriodSeconds = &gp + // UnsafeDisableDeepCopy indicates not to deep copy objects during get object. + // Be very careful with this, when enabled you must DeepCopy any object before mutating it, + // otherwise you will mutate the object in the cache. + // +optional + UnsafeDisableDeepCopy *bool +} + +var _ GetOption = &GetOptions{} + +// ApplyToGet implements GetOption for GetOptions. +func (o *GetOptions) ApplyToGet(lo *GetOptions) { + if o.Raw != nil { + lo.Raw = o.Raw + } + if o.UnsafeDisableDeepCopy != nil { + lo.UnsafeDisableDeepCopy = o.UnsafeDisableDeepCopy } } -// Preconditions is a functional option that sets the Preconditions field of a -// DeleteOptions struct. -func Preconditions(p *metav1.Preconditions) DeleteOptionFunc { - return func(opts *DeleteOptions) { - opts.Preconditions = p +// AsGetOptions returns these options as a flattened metav1.GetOptions. +// This may mutate the Raw field. +func (o *GetOptions) AsGetOptions() *metav1.GetOptions { + if o == nil || o.Raw == nil { + return &metav1.GetOptions{} } + return o.Raw } -// PropagationPolicy is a functional option that sets the PropagationPolicy -// field of a DeleteOptions struct. -func PropagationPolicy(p metav1.DeletionPropagation) DeleteOptionFunc { - return func(opts *DeleteOptions) { - opts.PropagationPolicy = &p +// ApplyOptions applies the given get options on these options, +// and then returns itself (for convenient chaining). +func (o *GetOptions) ApplyOptions(opts []GetOption) *GetOptions { + for _, opt := range opts { + opt.ApplyToGet(o) } + return o } +// }}} + +// {{{ List Options + // ListOptions contains options for limiting or filtering results. // It's generally a subset of metav1.ListOptions, with support for // pre-parsed selectors (since generally, selectors will be executed // against the cache). type ListOptions struct { - // LabelSelector filters results by label. Use SetLabelSelector to + // LabelSelector filters results by label. Use labels.Parse() to // set from raw string form. LabelSelector labels.Selector // FieldSelector filters results by a particular field. In order // to use this with cache-based implementations, restrict usage to - // a single field-value pair that's been added to the indexers. + // exact match field-value pair that's been added to the indexers. FieldSelector fields.Selector // Namespace represents the namespace to list for, or empty for // non-namespaced objects, or to list across all namespaces. Namespace string + // Limit specifies the maximum number of results to return from the server. The server may + // not support this field on all resource types, but if it does and more results remain it + // will set the continue field on the returned list object. This field is not supported if watch + // is true in the Raw ListOptions. + Limit int64 + // Continue is a token returned by the server that lets a client retrieve chunks of results + // from the server by specifying limit. The server may reject requests for continuation tokens + // it does not recognize and will return a 410 error if the token can no longer be used because + // it has expired. This field is not supported if watch is true in the Raw ListOptions. + Continue string + + // UnsafeDisableDeepCopy indicates not to deep copy objects during list objects. + // Be very careful with this, when enabled you must DeepCopy any object before mutating it, + // otherwise you will mutate the object in the cache. + // +optional + UnsafeDisableDeepCopy *bool + // Raw represents raw ListOptions, as passed to the API server. Note // that these may not be respected by all implementations of interface, - // and the LabelSelector and FieldSelector fields are ignored. + // and the LabelSelector, FieldSelector, Limit and Continue fields are ignored. Raw *metav1.ListOptions } -// SetLabelSelector sets this the label selector of these options -// from a string form of the selector. -func (o *ListOptions) SetLabelSelector(selRaw string) error { - sel, err := labels.Parse(selRaw) - if err != nil { - return err - } - o.LabelSelector = sel - return nil -} +var _ ListOption = &ListOptions{} -// SetFieldSelector sets this the label selector of these options -// from a string form of the selector. -func (o *ListOptions) SetFieldSelector(selRaw string) error { - sel, err := fields.ParseSelector(selRaw) - if err != nil { - return err +// ApplyToList implements ListOption for ListOptions. +func (o *ListOptions) ApplyToList(lo *ListOptions) { + if o.LabelSelector != nil { + lo.LabelSelector = o.LabelSelector + } + if o.FieldSelector != nil { + lo.FieldSelector = o.FieldSelector + } + if o.Namespace != "" { + lo.Namespace = o.Namespace + } + if o.Raw != nil { + lo.Raw = o.Raw + } + if o.Limit > 0 { + lo.Limit = o.Limit + } + if o.Continue != "" { + lo.Continue = o.Continue + } + if o.UnsafeDisableDeepCopy != nil { + lo.UnsafeDisableDeepCopy = o.UnsafeDisableDeepCopy } - o.FieldSelector = sel - return nil } // AsListOptions returns these options as a flattened metav1.ListOptions. @@ -213,85 +587,185 @@ func (o *ListOptions) AsListOptions() *metav1.ListOptions { if o.FieldSelector != nil { o.Raw.FieldSelector = o.FieldSelector.String() } + if !o.Raw.Watch { + o.Raw.Limit = o.Limit + o.Raw.Continue = o.Continue + } return o.Raw } -// ApplyOptions executes the given ListOptionFuncs and returns the mutated -// ListOptions. -func (o *ListOptions) ApplyOptions(optFuncs []ListOptionFunc) *ListOptions { - for _, optFunc := range optFuncs { - optFunc(o) +// ApplyOptions applies the given list options on these options, +// and then returns itself (for convenient chaining). +func (o *ListOptions) ApplyOptions(opts []ListOption) *ListOptions { + for _, opt := range opts { + opt.ApplyToList(o) } return o } -// ListOptionFunc is a function that mutates a ListOptions struct. It implements -// the functional options pattern. See -// https://github.com/tmrts/go-patterns/blob/master/idiom/functional-options.md. -type ListOptionFunc func(*ListOptions) +// MatchingLabels filters the list/delete operation on the given set of labels. +type MatchingLabels map[string]string -// MatchingLabels is a convenience function that sets the label selector -// to match the given labels, and then returns the options. -// It mutates the list options. -func (o *ListOptions) MatchingLabels(lbls map[string]string) *ListOptions { - sel := labels.SelectorFromSet(lbls) - o.LabelSelector = sel - return o +// ApplyToList applies this configuration to the given list options. +func (m MatchingLabels) ApplyToList(opts *ListOptions) { + // TODO(directxman12): can we avoid reserializing this over and over? + if opts.LabelSelector == nil { + opts.LabelSelector = labels.SelectorFromValidatedSet(map[string]string(m)) + return + } + // If there's already a selector, we need to AND the two together. + noValidSel := labels.SelectorFromValidatedSet(map[string]string(m)) + reqs, _ := noValidSel.Requirements() + for _, req := range reqs { + opts.LabelSelector = opts.LabelSelector.Add(req) + } } -// MatchingField is a convenience function that sets the field selector -// to match the given field, and then returns the options. -// It mutates the list options. -func (o *ListOptions) MatchingField(name, val string) *ListOptions { - sel := fields.SelectorFromSet(fields.Set{name: val}) - o.FieldSelector = sel - return o +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) } -// InNamespace is a convenience function that sets the namespace, -// and then returns the options. It mutates the list options. -func (o *ListOptions) InNamespace(ns string) *ListOptions { - o.Namespace = ns - return o -} +// HasLabels filters the list/delete operation checking if the set of labels exists +// without checking their values. +type HasLabels []string -// MatchingLabels is a functional option that sets the LabelSelector field of -// a ListOptions struct. -func MatchingLabels(lbls map[string]string) ListOptionFunc { - sel := labels.SelectorFromSet(lbls) - return func(opts *ListOptions) { - opts.LabelSelector = sel +// ApplyToList applies this configuration to the given list options. +func (m HasLabels) ApplyToList(opts *ListOptions) { + if opts.LabelSelector == nil { + opts.LabelSelector = labels.NewSelector() + } + // TODO: ignore invalid labels will result in an empty selector. + // This is inconsistent to the that of MatchingLabels. + for _, label := range m { + r, err := labels.NewRequirement(label, selection.Exists, nil) + if err == nil { + opts.LabelSelector = opts.LabelSelector.Add(*r) + } } } -// MatchingField is a functional option that sets the FieldSelector field of -// a ListOptions struct. -func MatchingField(name, val string) ListOptionFunc { - sel := fields.SelectorFromSet(fields.Set{name: val}) - return func(opts *ListOptions) { - opts.FieldSelector = sel - } +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m HasLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) } -// InNamespace is a functional option that sets the Namespace field of -// a ListOptions struct. -func InNamespace(ns string) ListOptionFunc { - return func(opts *ListOptions) { - opts.Namespace = ns +// MatchingLabelsSelector filters the list/delete operation on the given label +// selector (or index in the case of cached lists). A struct is used because +// labels.Selector is an interface, which cannot be aliased. +type MatchingLabelsSelector struct { + labels.Selector +} + +// ApplyToList applies this configuration to the given list options. +func (m MatchingLabelsSelector) ApplyToList(opts *ListOptions) { + if m.Selector == nil { + m.Selector = labels.Nothing() } + opts.LabelSelector = m +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingLabelsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingFields filters the list/delete operation on the given field Set +// (or index in the case of cached lists). +type MatchingFields fields.Set + +// ApplyToList applies this configuration to the given list options. +func (m MatchingFields) ApplyToList(opts *ListOptions) { + // TODO(directxman12): can we avoid re-serializing this? + sel := fields.Set(m).AsSelector() + opts.FieldSelector = sel +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingFields) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingFieldsSelector filters the list/delete operation on the given field +// selector (or index in the case of cached lists). A struct is used because +// fields.Selector is an interface, which cannot be aliased. +type MatchingFieldsSelector struct { + fields.Selector } -// UseListOptions is a functional option that replaces the fields of a -// ListOptions struct with those of a different ListOptions struct. -// -// Example: -// cl.List(ctx, list, client.UseListOptions(lo.InNamespace(ns).MatchingLabels(labels))) -func UseListOptions(newOpts *ListOptions) ListOptionFunc { - return func(opts *ListOptions) { - *opts = *newOpts +// ApplyToList applies this configuration to the given list options. +func (m MatchingFieldsSelector) ApplyToList(opts *ListOptions) { + if m.Selector == nil { + m.Selector = fields.Nothing() } + opts.FieldSelector = m +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingFieldsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) } +// InNamespace restricts the list/delete operation to the given namespace. +type InNamespace string + +// ApplyToList applies this configuration to the given list options. +func (n InNamespace) ApplyToList(opts *ListOptions) { + opts.Namespace = string(n) +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (n InNamespace) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + n.ApplyToList(&opts.ListOptions) +} + +// AsSelector returns a selector that matches objects in the given namespace. +func (n InNamespace) AsSelector() fields.Selector { + return fields.SelectorFromSet(fields.Set{"metadata.namespace": string(n)}) +} + +// Limit specifies the maximum number of results to return from the server. +// Limit does not implement DeleteAllOfOption interface because the server +// does not support setting it for deletecollection operations. +type Limit int64 + +// ApplyToList applies this configuration to the given an list options. +func (l Limit) ApplyToList(opts *ListOptions) { + opts.Limit = int64(l) +} + +// UnsafeDisableDeepCopyOption indicates not to deep copy objects during list objects. +// Be very careful with this, when enabled you must DeepCopy any object before mutating it, +// otherwise you will mutate the object in the cache. +type UnsafeDisableDeepCopyOption bool + +// ApplyToGet applies this configuration to the given an Get options. +func (d UnsafeDisableDeepCopyOption) ApplyToGet(opts *GetOptions) { + opts.UnsafeDisableDeepCopy = ptr.To(bool(d)) +} + +// ApplyToList applies this configuration to the given an List options. +func (d UnsafeDisableDeepCopyOption) ApplyToList(opts *ListOptions) { + opts.UnsafeDisableDeepCopy = ptr.To(bool(d)) +} + +// UnsafeDisableDeepCopy indicates not to deep copy objects during list objects. +const UnsafeDisableDeepCopy = UnsafeDisableDeepCopyOption(true) + +// Continue sets a continuation token to retrieve chunks of results when using limit. +// Continue does not implement DeleteAllOfOption interface because the server +// does not support setting it for deletecollection operations. +type Continue string + +// ApplyToList applies this configuration to the given an List options. +func (c Continue) ApplyToList(opts *ListOptions) { + opts.Continue = string(c) +} + +// }}} + +// {{{ Update Options + // UpdateOptions contains options for create requests. It's generally a subset // of metav1.UpdateOptions. type UpdateOptions struct { @@ -302,6 +776,28 @@ type UpdateOptions struct { // - All: all dry run stages will be processed DryRun []string + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // fieldValidation instructs the server on how to handle + // objects in the request (POST/PUT/PATCH) containing unknown + // or duplicate fields. Valid values are: + // - Ignore: This will ignore any unknown fields that are silently + // dropped from the object, and will ignore all but the last duplicate + // field that the decoder encounters. This is the default behavior + // prior to v1.23. + // - Warn: This will send a warning via the standard warning response + // header for each unknown field that is dropped from the object, and + // for each duplicate field that is encountered. The request will + // still succeed if there are no other errors, and will only persist + // the last of any duplicate fields. This is the default in v1.23+ + // - Strict: This will fail the request with a BadRequest error if + // any unknown fields would be dropped from the object, or if any + // duplicate fields are present. The error returned from the server + // will contain all unknown and duplicate fields encountered. + FieldValidation string + // Raw represents raw UpdateOptions, as passed to the API server. Raw *metav1.UpdateOptions } @@ -309,7 +805,6 @@ type UpdateOptions struct { // AsUpdateOptions returns these options as a metav1.UpdateOptions. // This may mutate the Raw field. func (o *UpdateOptions) AsUpdateOptions() *metav1.UpdateOptions { - if o == nil { return &metav1.UpdateOptions{} } @@ -318,29 +813,42 @@ func (o *UpdateOptions) AsUpdateOptions() *metav1.UpdateOptions { } o.Raw.DryRun = o.DryRun + o.Raw.FieldManager = o.FieldManager + o.Raw.FieldValidation = o.FieldValidation return o.Raw } -// ApplyOptions executes the given UpdateOptionFuncs and returns the mutated -// UpdateOptions. -func (o *UpdateOptions) ApplyOptions(optFuncs []UpdateOptionFunc) *UpdateOptions { - for _, optFunc := range optFuncs { - optFunc(o) +// ApplyOptions applies the given update options on these options, +// and then returns itself (for convenient chaining). +func (o *UpdateOptions) ApplyOptions(opts []UpdateOption) *UpdateOptions { + for _, opt := range opts { + opt.ApplyToUpdate(o) } return o } -// UpdateOptionFunc is a function that mutates a UpdateOptions struct. It implements -// the functional options pattern. See -// https://github.com/tmrts/go-patterns/blob/master/idiom/functional-options.md. -type UpdateOptionFunc func(*UpdateOptions) +var _ UpdateOption = &UpdateOptions{} -// UpdateDryRunAll is a functional option that sets the DryRun -// field of a UpdateOptions struct to metav1.DryRunAll. -var UpdateDryRunAll UpdateOptionFunc = func(opts *UpdateOptions) { - opts.DryRun = []string{metav1.DryRunAll} +// ApplyToUpdate implements UpdateOption. +func (o *UpdateOptions) ApplyToUpdate(uo *UpdateOptions) { + if o.DryRun != nil { + uo.DryRun = o.DryRun + } + if o.FieldManager != "" { + uo.FieldManager = o.FieldManager + } + if o.FieldValidation != "" { + uo.FieldValidation = o.FieldValidation + } + if o.Raw != nil { + uo.Raw = o.Raw + } } +// }}} + +// {{{ Patch Options + // PatchOptions contains options for patch requests. type PatchOptions struct { // When present, indicates that modifications should not be @@ -360,15 +868,33 @@ type PatchOptions struct { // this request. It must be set with server-side apply. FieldManager string + // fieldValidation instructs the server on how to handle + // objects in the request (POST/PUT/PATCH) containing unknown + // or duplicate fields. Valid values are: + // - Ignore: This will ignore any unknown fields that are silently + // dropped from the object, and will ignore all but the last duplicate + // field that the decoder encounters. This is the default behavior + // prior to v1.23. + // - Warn: This will send a warning via the standard warning response + // header for each unknown field that is dropped from the object, and + // for each duplicate field that is encountered. The request will + // still succeed if there are no other errors, and will only persist + // the last of any duplicate fields. This is the default in v1.23+ + // - Strict: This will fail the request with a BadRequest error if + // any unknown fields would be dropped from the object, or if any + // duplicate fields are present. The error returned from the server + // will contain all unknown and duplicate fields encountered. + FieldValidation string + // Raw represents raw PatchOptions, as passed to the API server. Raw *metav1.PatchOptions } -// ApplyOptions executes the given PatchOptionFuncs, mutating these PatchOptions. -// It returns the mutated PatchOptions for convenience. -func (o *PatchOptions) ApplyOptions(optFuncs []PatchOptionFunc) *PatchOptions { - for _, optFunc := range optFuncs { - optFunc(o) +// ApplyOptions applies the given patch options on these options, +// and then returns itself (for convenient chaining). +func (o *PatchOptions) ApplyOptions(opts []PatchOption) *PatchOptions { + for _, opt := range opts { + opt.ApplyToPatch(o) } return o } @@ -383,35 +909,147 @@ func (o *PatchOptions) AsPatchOptions() *metav1.PatchOptions { o.Raw = &metav1.PatchOptions{} } - o.Raw.DryRun = o.DryRun - o.Raw.Force = o.Force - o.Raw.FieldManager = o.FieldManager + if o.DryRun != nil { + o.Raw.DryRun = o.DryRun + } + if o.Force != nil { + o.Raw.Force = o.Force + } + if o.FieldManager != "" { + o.Raw.FieldManager = o.FieldManager + } + if o.FieldValidation != "" { + o.Raw.FieldValidation = o.FieldValidation + } return o.Raw } -// PatchOptionFunc is a function that mutates a PatchOptions struct. It implements -// the functional options pattern. See -// https://github.com/tmrts/go-patterns/blob/master/idiom/functional-options.md. -type PatchOptionFunc func(*PatchOptions) +var _ PatchOption = &PatchOptions{} -// ForceOwnership sets the Force option, indicating that -// in case of conflicts with server-side apply, the client should -// acquire ownership of the conflicting field. Most controllers -// should use this. -var ForceOwnership PatchOptionFunc = func(opts *PatchOptions) { - definitelyTrue := true - opts.Force = &definitelyTrue +// ApplyToPatch implements PatchOptions. +func (o *PatchOptions) ApplyToPatch(po *PatchOptions) { + if o.DryRun != nil { + po.DryRun = o.DryRun + } + if o.Force != nil { + po.Force = o.Force + } + if o.FieldManager != "" { + po.FieldManager = o.FieldManager + } + if o.FieldValidation != "" { + po.FieldValidation = o.FieldValidation + } + if o.Raw != nil { + po.Raw = o.Raw + } } -// PatchDryRunAll is a functional option that sets the DryRun -// field of a PatchOptions struct to metav1.DryRunAll. -var PatchDryRunAll PatchOptionFunc = func(opts *PatchOptions) { - opts.DryRun = []string{metav1.DryRunAll} +// ForceOwnership indicates that in case of conflicts with server-side apply, +// the client should acquire ownership of the conflicting field. Most +// controllers should use this. +var ForceOwnership = forceOwnership{} + +type forceOwnership struct{} + +func (forceOwnership) ApplyToPatch(opts *PatchOptions) { + opts.Force = ptr.To(true) } -// FieldOwner set the field manager name for the given server-side apply patch. -func FieldOwner(name string) PatchOptionFunc { - return func(opts *PatchOptions) { - opts.FieldManager = name +func (forceOwnership) ApplyToSubResourcePatch(opts *SubResourcePatchOptions) { + opts.Force = ptr.To(true) +} + +func (forceOwnership) ApplyToApply(opts *ApplyOptions) { + opts.Force = ptr.To(true) +} + +func (forceOwnership) ApplyToSubResourceApply(opts *SubResourceApplyOptions) { + opts.Force = ptr.To(true) +} + +// }}} + +// {{{ DeleteAllOf Options + +// these are all just delete options and list options + +// DeleteAllOfOptions contains options for deletecollection (deleteallof) requests. +// It's just list and delete options smooshed together. +type DeleteAllOfOptions struct { + ListOptions + DeleteOptions +} + +// ApplyOptions applies the given deleteallof options on these options, +// and then returns itself (for convenient chaining). +func (o *DeleteAllOfOptions) ApplyOptions(opts []DeleteAllOfOption) *DeleteAllOfOptions { + for _, opt := range opts { + opt.ApplyToDeleteAllOf(o) + } + return o +} + +var _ DeleteAllOfOption = &DeleteAllOfOptions{} + +// ApplyToDeleteAllOf implements DeleteAllOfOption. +func (o *DeleteAllOfOptions) ApplyToDeleteAllOf(do *DeleteAllOfOptions) { + o.ApplyToList(&do.ListOptions) + o.ApplyToDelete(&do.DeleteOptions) +} + +// }}} + +// ApplyOptions are the options for an apply request. +type ApplyOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // Force is going to "force" Apply requests. It means user will + // re-acquire conflicting fields owned by other people. + Force *bool + + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. This + // field is required. + // + // +required + FieldManager string +} + +// ApplyOptions applies the given opts onto the ApplyOptions +func (o *ApplyOptions) ApplyOptions(opts []ApplyOption) *ApplyOptions { + for _, opt := range opts { + opt.ApplyToApply(o) + } + return o +} + +// ApplyToApply applies the given opts onto the ApplyOptions +func (o *ApplyOptions) ApplyToApply(opts *ApplyOptions) { + if o.DryRun != nil { + opts.DryRun = o.DryRun + } + if o.Force != nil { + opts.Force = o.Force + } + + if o.FieldManager != "" { + opts.FieldManager = o.FieldManager + } +} + +// AsPatchOptions constructs patch options from the given ApplyOptions +func (o *ApplyOptions) AsPatchOptions() *metav1.PatchOptions { + return &metav1.PatchOptions{ + DryRun: o.DryRun, + Force: o.Force, + FieldManager: o.FieldManager, } } diff --git a/pkg/client/options_test.go b/pkg/client/options_test.go new file mode 100644 index 0000000000..082586bca3 --- /dev/null +++ b/pkg/client/options_test.go @@ -0,0 +1,493 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("ListOptions", func() { + It("Should set LabelSelector", func() { + labelSelector, err := labels.Parse("a=b") + Expect(err).NotTo(HaveOccurred()) + o := &client.ListOptions{LabelSelector: labelSelector} + newListOpts := &client.ListOptions{} + o.ApplyToList(newListOpts) + Expect(newListOpts).To(Equal(o)) + }) + It("Should set LabelSelector with MatchingLabelsSelector", func() { + labelSelector, err := labels.Parse("a=b") + Expect(err).NotTo(HaveOccurred()) + newListOpts := &client.ListOptions{} + newListOpts.ApplyOptions([]client.ListOption{client.MatchingLabelsSelector{Selector: labelSelector}}) + expectedListOpts := &client.ListOptions{LabelSelector: client.MatchingLabelsSelector{Selector: labelSelector}} + Expect(newListOpts).To(Equal(expectedListOpts)) + }) + It("Should set LabelSelector to nothing with empty MatchingLabelsSelector", func() { + newListOpts := &client.ListOptions{} + newListOpts.ApplyOptions([]client.ListOption{client.MatchingLabelsSelector{}}) + expectedListOpts := &client.ListOptions{LabelSelector: client.MatchingLabelsSelector{Selector: labels.Nothing()}} + Expect(newListOpts).To(Equal(expectedListOpts)) + }) + It("Should set FieldSelector", func() { + o := &client.ListOptions{FieldSelector: fields.Nothing()} + newListOpts := &client.ListOptions{} + o.ApplyToList(newListOpts) + Expect(newListOpts).To(Equal(o)) + }) + It("Should set FieldSelector with MatchingFieldsSelector", func() { + newListOpts := &client.ListOptions{} + newListOpts.ApplyOptions([]client.ListOption{client.MatchingFieldsSelector{Selector: fields.Nothing()}}) + expectedListOpts := &client.ListOptions{FieldSelector: client.MatchingFieldsSelector{Selector: fields.Nothing()}} + Expect(newListOpts).To(Equal(expectedListOpts)) + }) + It("Should set FieldSelector to nothing with empty MatchingFieldsSelector", func() { + newListOpts := &client.ListOptions{} + newListOpts.ApplyOptions([]client.ListOption{client.MatchingFieldsSelector{}}) + expectedListOpts := &client.ListOptions{FieldSelector: client.MatchingFieldsSelector{Selector: fields.Nothing()}} + Expect(newListOpts).To(Equal(expectedListOpts)) + }) + It("Should set Namespace", func() { + o := &client.ListOptions{Namespace: "my-ns"} + newListOpts := &client.ListOptions{} + o.ApplyToList(newListOpts) + Expect(newListOpts).To(Equal(o)) + }) + It("Should set Raw", func() { + o := &client.ListOptions{Raw: &metav1.ListOptions{FieldSelector: "Hans"}} + newListOpts := &client.ListOptions{} + o.ApplyToList(newListOpts) + Expect(newListOpts).To(Equal(o)) + }) + It("Should set Limit", func() { + o := &client.ListOptions{Limit: int64(1)} + newListOpts := &client.ListOptions{} + o.ApplyToList(newListOpts) + Expect(newListOpts).To(Equal(o)) + }) + It("Should set Continue", func() { + o := &client.ListOptions{Continue: "foo"} + newListOpts := &client.ListOptions{} + o.ApplyToList(newListOpts) + Expect(newListOpts).To(Equal(o)) + }) + It("Should set UnsafeDisableDeepCopy", func() { + definitelyTrue := true + o := &client.ListOptions{UnsafeDisableDeepCopy: &definitelyTrue} + newListOpts := &client.ListOptions{} + o.ApplyToList(newListOpts) + Expect(newListOpts).To(Equal(o)) + }) + It("Should set UnsafeDisableDeepCopy through option", func() { + listOpts := &client.ListOptions{} + client.UnsafeDisableDeepCopy.ApplyToList(listOpts) + Expect(listOpts.UnsafeDisableDeepCopy).ToNot(BeNil()) + Expect(*listOpts.UnsafeDisableDeepCopy).To(BeTrue()) + }) + It("Should not set anything", func() { + o := &client.ListOptions{} + newListOpts := &client.ListOptions{} + o.ApplyToList(newListOpts) + Expect(newListOpts).To(Equal(o)) + }) +}) + +var _ = Describe("GetOptions", func() { + It("Should set Raw", func() { + o := &client.GetOptions{Raw: &metav1.GetOptions{ResourceVersion: "RV0"}} + newGetOpts := &client.GetOptions{} + o.ApplyToGet(newGetOpts) + Expect(newGetOpts).To(Equal(o)) + }) + It("Should set UnsafeDisableDeepCopy", func() { + definitelyTrue := true + o := &client.GetOptions{UnsafeDisableDeepCopy: &definitelyTrue} + newGetOpts := &client.GetOptions{} + o.ApplyToGet(newGetOpts) + Expect(newGetOpts).To(Equal(o)) + }) + It("Should set UnsafeDisableDeepCopy through option", func() { + getOpts := &client.GetOptions{} + client.UnsafeDisableDeepCopy.ApplyToGet(getOpts) + Expect(getOpts.UnsafeDisableDeepCopy).ToNot(BeNil()) + Expect(*getOpts.UnsafeDisableDeepCopy).To(BeTrue()) + }) +}) + +var _ = Describe("ApplyOptions", func() { + It("Should set DryRun", func() { + o := &client.ApplyOptions{DryRun: []string{"Hello", "Theodore"}} + newApplyOpts := &client.ApplyOptions{} + o.ApplyToApply(newApplyOpts) + Expect(newApplyOpts).To(Equal(o)) + }) + It("Should set Force", func() { + o := &client.ApplyOptions{Force: ptr.To(true)} + newApplyOpts := &client.ApplyOptions{} + o.ApplyToApply(newApplyOpts) + Expect(newApplyOpts).To(Equal(o)) + }) + It("Should set FieldManager", func() { + o := &client.ApplyOptions{FieldManager: "field-manager"} + newApplyOpts := &client.ApplyOptions{} + o.ApplyToApply(newApplyOpts) + Expect(newApplyOpts).To(Equal(o)) + }) +}) + +var _ = Describe("CreateOptions", func() { + It("Should set DryRun", func() { + o := &client.CreateOptions{DryRun: []string{"Hello", "Theodore"}} + newCreateOpts := &client.CreateOptions{} + o.ApplyToCreate(newCreateOpts) + Expect(newCreateOpts).To(Equal(o)) + }) + It("Should set FieldManager", func() { + o := &client.CreateOptions{FieldManager: "FieldManager"} + newCreateOpts := &client.CreateOptions{} + o.ApplyToCreate(newCreateOpts) + Expect(newCreateOpts).To(Equal(o)) + }) + It("Should set Raw", func() { + o := &client.CreateOptions{Raw: &metav1.CreateOptions{DryRun: []string{"Bye", "Theodore"}}} + newCreateOpts := &client.CreateOptions{} + o.ApplyToCreate(newCreateOpts) + Expect(newCreateOpts).To(Equal(o)) + }) + It("Should not set anything", func() { + o := &client.CreateOptions{} + newCreateOpts := &client.CreateOptions{} + o.ApplyToCreate(newCreateOpts) + Expect(newCreateOpts).To(Equal(o)) + }) +}) + +var _ = Describe("DeleteOptions", func() { + It("Should set GracePeriodSeconds", func() { + o := &client.DeleteOptions{GracePeriodSeconds: ptr.To(int64(42))} + newDeleteOpts := &client.DeleteOptions{} + o.ApplyToDelete(newDeleteOpts) + Expect(newDeleteOpts).To(Equal(o)) + }) + It("Should set Preconditions", func() { + o := &client.DeleteOptions{Preconditions: &metav1.Preconditions{}} + newDeleteOpts := &client.DeleteOptions{} + o.ApplyToDelete(newDeleteOpts) + Expect(newDeleteOpts).To(Equal(o)) + }) + It("Should set PropagationPolicy", func() { + policy := metav1.DeletePropagationBackground + o := &client.DeleteOptions{PropagationPolicy: &policy} + newDeleteOpts := &client.DeleteOptions{} + o.ApplyToDelete(newDeleteOpts) + Expect(newDeleteOpts).To(Equal(o)) + }) + It("Should set Raw", func() { + o := &client.DeleteOptions{Raw: &metav1.DeleteOptions{}} + newDeleteOpts := &client.DeleteOptions{} + o.ApplyToDelete(newDeleteOpts) + Expect(newDeleteOpts).To(Equal(o)) + }) + It("Should set DryRun", func() { + o := &client.DeleteOptions{DryRun: []string{"Hello", "Pippa"}} + newDeleteOpts := &client.DeleteOptions{} + o.ApplyToDelete(newDeleteOpts) + Expect(newDeleteOpts).To(Equal(o)) + }) + It("Should not set anything", func() { + o := &client.DeleteOptions{} + newDeleteOpts := &client.DeleteOptions{} + o.ApplyToDelete(newDeleteOpts) + Expect(newDeleteOpts).To(Equal(o)) + }) +}) + +var _ = Describe("UpdateOptions", func() { + It("Should set DryRun", func() { + o := &client.UpdateOptions{DryRun: []string{"Bye", "Pippa"}} + newUpdateOpts := &client.UpdateOptions{} + o.ApplyToUpdate(newUpdateOpts) + Expect(newUpdateOpts).To(Equal(o)) + }) + It("Should set FieldManager", func() { + o := &client.UpdateOptions{FieldManager: "Hello Boris"} + newUpdateOpts := &client.UpdateOptions{} + o.ApplyToUpdate(newUpdateOpts) + Expect(newUpdateOpts).To(Equal(o)) + }) + It("Should set Raw", func() { + o := &client.UpdateOptions{Raw: &metav1.UpdateOptions{}} + newUpdateOpts := &client.UpdateOptions{} + o.ApplyToUpdate(newUpdateOpts) + Expect(newUpdateOpts).To(Equal(o)) + }) + It("Should not set anything", func() { + o := &client.UpdateOptions{} + newUpdateOpts := &client.UpdateOptions{} + o.ApplyToUpdate(newUpdateOpts) + Expect(newUpdateOpts).To(Equal(o)) + }) +}) + +var _ = Describe("PatchOptions", func() { + It("Should set DryRun", func() { + o := &client.PatchOptions{DryRun: []string{"Bye", "Boris"}} + newPatchOpts := &client.PatchOptions{} + o.ApplyToPatch(newPatchOpts) + Expect(newPatchOpts).To(Equal(o)) + }) + It("Should set Force", func() { + o := &client.PatchOptions{Force: ptr.To(true)} + newPatchOpts := &client.PatchOptions{} + o.ApplyToPatch(newPatchOpts) + Expect(newPatchOpts).To(Equal(o)) + }) + It("Should set FieldManager", func() { + o := &client.PatchOptions{FieldManager: "Hello Julian"} + newPatchOpts := &client.PatchOptions{} + o.ApplyToPatch(newPatchOpts) + Expect(newPatchOpts).To(Equal(o)) + }) + It("Should set Raw", func() { + o := &client.PatchOptions{Raw: &metav1.PatchOptions{}} + newPatchOpts := &client.PatchOptions{} + o.ApplyToPatch(newPatchOpts) + Expect(newPatchOpts).To(Equal(o)) + }) + It("Should not set anything", func() { + o := &client.PatchOptions{} + newPatchOpts := &client.PatchOptions{} + o.ApplyToPatch(newPatchOpts) + Expect(newPatchOpts).To(Equal(o)) + }) +}) + +var _ = Describe("DeleteAllOfOptions", func() { + It("Should set ListOptions", func() { + o := &client.DeleteAllOfOptions{ListOptions: client.ListOptions{Raw: &metav1.ListOptions{}}} + newDeleteAllOfOpts := &client.DeleteAllOfOptions{} + o.ApplyToDeleteAllOf(newDeleteAllOfOpts) + Expect(newDeleteAllOfOpts).To(Equal(o)) + }) + It("Should set DeleleteOptions", func() { + o := &client.DeleteAllOfOptions{DeleteOptions: client.DeleteOptions{GracePeriodSeconds: ptr.To(int64(44))}} + newDeleteAllOfOpts := &client.DeleteAllOfOptions{} + o.ApplyToDeleteAllOf(newDeleteAllOfOpts) + Expect(newDeleteAllOfOpts).To(Equal(o)) + }) +}) + +var _ = Describe("MatchingLabels", func() { + It("Should produce an invalid selector when given invalid input", func() { + matchingLabels := client.MatchingLabels(map[string]string{"k": "axahm2EJ8Phiephe2eixohbee9eGeiyees1thuozi1xoh0GiuH3diewi8iem7Nui"}) + listOpts := &client.ListOptions{} + matchingLabels.ApplyToList(listOpts) + + r, _ := listOpts.LabelSelector.Requirements() + _, err := labels.NewRequirement(r[0].Key(), r[0].Operator(), r[0].Values().List()) + Expect(err).To(HaveOccurred()) + expectedErrMsg := `values[0][k]: Invalid value: "axahm2EJ8Phiephe2eixohbee9eGeiyees1thuozi1xoh0GiuH3diewi8iem7Nui": must be no more than 63 characters` + Expect(err.Error()).To(Equal(expectedErrMsg)) + }) + + It("Should add matchingLabels to existing selector", func() { + listOpts := &client.ListOptions{} + + matchingLabels := client.MatchingLabels(map[string]string{"k": "v"}) + matchingLabels2 := client.MatchingLabels(map[string]string{"k2": "v2"}) + + matchingLabels.ApplyToList(listOpts) + Expect(listOpts.LabelSelector.String()).To(Equal("k=v")) + + matchingLabels2.ApplyToList(listOpts) + Expect(listOpts.LabelSelector.String()).To(Equal("k=v,k2=v2")) + }) +}) + +var _ = Describe("DryRunAll", func() { + It("Should apply to ApplyOptions", func() { + o := &client.ApplyOptions{DryRun: []string{"server"}} + t := client.DryRunAll + t.ApplyToApply(o) + Expect(o.DryRun).To(Equal([]string{metav1.DryRunAll})) + }) + It("Should apply to CreateOptions", func() { + o := &client.CreateOptions{DryRun: []string{"server"}} + t := client.DryRunAll + t.ApplyToCreate(o) + Expect(o.DryRun).To(Equal([]string{metav1.DryRunAll})) + }) + It("Should apply to UpdateOptions", func() { + o := &client.UpdateOptions{DryRun: []string{"server"}} + t := client.DryRunAll + t.ApplyToUpdate(o) + Expect(o.DryRun).To(Equal([]string{metav1.DryRunAll})) + }) + It("Should apply to PatchOptions", func() { + o := &client.PatchOptions{DryRun: []string{"server"}} + t := client.DryRunAll + t.ApplyToPatch(o) + Expect(o.DryRun).To(Equal([]string{metav1.DryRunAll})) + }) + It("Should apply to DeleteOptions", func() { + o := &client.DeleteOptions{DryRun: []string{"server"}} + t := client.DryRunAll + t.ApplyToDelete(o) + Expect(o.DryRun).To(Equal([]string{metav1.DryRunAll})) + }) + It("Should apply to SubResourcePatchOptions", func() { + o := &client.SubResourcePatchOptions{PatchOptions: client.PatchOptions{DryRun: []string{"server"}}} + t := client.DryRunAll + t.ApplyToSubResourcePatch(o) + Expect(o.DryRun).To(Equal([]string{metav1.DryRunAll})) + }) + It("Should apply to SubResourceCreateOptions", func() { + o := &client.SubResourceCreateOptions{CreateOptions: client.CreateOptions{DryRun: []string{"server"}}} + t := client.DryRunAll + t.ApplyToSubResourceCreate(o) + Expect(o.DryRun).To(Equal([]string{metav1.DryRunAll})) + }) + It("Should apply to SubResourceUpdateOptions", func() { + o := &client.SubResourceUpdateOptions{UpdateOptions: client.UpdateOptions{DryRun: []string{"server"}}} + t := client.DryRunAll + t.ApplyToSubResourceUpdate(o) + Expect(o.DryRun).To(Equal([]string{metav1.DryRunAll})) + }) + It("Should apply to SubResourceApplyOptions", func() { + o := &client.SubResourceApplyOptions{ApplyOptions: client.ApplyOptions{DryRun: []string{"server"}}} + t := client.DryRunAll + t.ApplyToSubResourceApply(o) + Expect(o.DryRun).To(Equal([]string{metav1.DryRunAll})) + }) +}) + +var _ = Describe("FieldOwner", func() { + It("Should apply to PatchOptions", func() { + o := &client.PatchOptions{FieldManager: "bar"} + t := client.FieldOwner("foo") + t.ApplyToPatch(o) + Expect(o.FieldManager).To(Equal("foo")) + }) + It("Should apply to ApplyOptions", func() { + o := &client.ApplyOptions{FieldManager: "bar"} + t := client.FieldOwner("foo") + t.ApplyToApply(o) + Expect(o.FieldManager).To(Equal("foo")) + }) + It("Should apply to CreateOptions", func() { + o := &client.CreateOptions{FieldManager: "bar"} + t := client.FieldOwner("foo") + t.ApplyToCreate(o) + Expect(o.FieldManager).To(Equal("foo")) + }) + It("Should apply to UpdateOptions", func() { + o := &client.UpdateOptions{FieldManager: "bar"} + t := client.FieldOwner("foo") + t.ApplyToUpdate(o) + Expect(o.FieldManager).To(Equal("foo")) + }) + It("Should apply to SubResourcePatchOptions", func() { + o := &client.SubResourcePatchOptions{PatchOptions: client.PatchOptions{FieldManager: "bar"}} + t := client.FieldOwner("foo") + t.ApplyToSubResourcePatch(o) + Expect(o.FieldManager).To(Equal("foo")) + }) + It("Should apply to SubResourceCreateOptions", func() { + o := &client.SubResourceCreateOptions{CreateOptions: client.CreateOptions{FieldManager: "bar"}} + t := client.FieldOwner("foo") + t.ApplyToSubResourceCreate(o) + Expect(o.FieldManager).To(Equal("foo")) + }) + It("Should apply to SubResourceUpdateOptions", func() { + o := &client.SubResourceUpdateOptions{UpdateOptions: client.UpdateOptions{FieldManager: "bar"}} + t := client.FieldOwner("foo") + t.ApplyToSubResourceUpdate(o) + Expect(o.FieldManager).To(Equal("foo")) + }) + It("Should apply to SubResourceApplyOptions", func() { + o := &client.SubResourceApplyOptions{ApplyOptions: client.ApplyOptions{FieldManager: "bar"}} + t := client.FieldOwner("foo") + t.ApplyToSubResourceApply(o) + Expect(o.FieldManager).To(Equal("foo")) + }) +}) + +var _ = Describe("ForceOwnership", func() { + It("Should apply to PatchOptions", func() { + o := &client.PatchOptions{} + t := client.ForceOwnership + t.ApplyToPatch(o) + Expect(*o.Force).To(BeTrue()) + }) + It("Should apply to SubResourcePatchOptions", func() { + o := &client.SubResourcePatchOptions{PatchOptions: client.PatchOptions{}} + t := client.ForceOwnership + t.ApplyToSubResourcePatch(o) + Expect(*o.Force).To(BeTrue()) + }) + It("Should apply to ApplyOptions", func() { + o := &client.ApplyOptions{} + t := client.ForceOwnership + t.ApplyToApply(o) + Expect(*o.Force).To(BeTrue()) + }) + It("Should apply to SubResourceApplyOptions", func() { + o := &client.SubResourceApplyOptions{} + t := client.ForceOwnership + t.ApplyToSubResourceApply(o) + Expect(*o.Force).To(BeTrue()) + }) +}) + +var _ = Describe("HasLabels", func() { + It("Should produce hasLabels in given order", func() { + listOpts := &client.ListOptions{} + + hasLabels := client.HasLabels([]string{"labelApe", "labelFox"}) + hasLabels.ApplyToList(listOpts) + Expect(listOpts.LabelSelector.String()).To(Equal("labelApe,labelFox")) + }) + + It("Should add hasLabels to existing hasLabels selector", func() { + listOpts := &client.ListOptions{} + + hasLabel := client.HasLabels([]string{"labelApe"}) + hasLabel.ApplyToList(listOpts) + + hasOtherLabel := client.HasLabels([]string{"labelFox"}) + hasOtherLabel.ApplyToList(listOpts) + Expect(listOpts.LabelSelector.String()).To(Equal("labelApe,labelFox")) + }) + + It("Should add hasLabels to existing matchingLabels", func() { + listOpts := &client.ListOptions{} + + matchingLabels := client.MatchingLabels(map[string]string{"k": "v"}) + matchingLabels.ApplyToList(listOpts) + + hasLabel := client.HasLabels([]string{"labelApe"}) + hasLabel.ApplyToList(listOpts) + Expect(listOpts.LabelSelector.String()).To(Equal("k=v,labelApe")) + }) +}) diff --git a/pkg/client/patch.go b/pkg/client/patch.go index ab7efc287b..9bd0953fdc 100644 --- a/pkg/client/patch.go +++ b/pkg/client/patch.go @@ -17,15 +17,23 @@ limitations under the License. package client import ( - jsonpatch "github.com/evanphx/json-patch" - "k8s.io/apimachinery/pkg/runtime" + "fmt" + + jsonpatch "github.com/evanphx/json-patch/v5" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/strategicpatch" ) var ( // Apply uses server-side apply to patch the given object. - Apply = applyPatch{} + // + // Deprecated: Use client.Client.Apply() and client.Client.SubResource("subrsource").Apply() instead. + Apply Patch = applyPatch{} + + // Merge uses the raw object as a merge patch, without modifications. + // Use MergeFrom if you wish to compute a diff instead. + Merge Patch = mergePatch{} ) type patch struct { @@ -39,42 +47,154 @@ func (s *patch) Type() types.PatchType { } // Data implements Patch. -func (s *patch) Data(obj runtime.Object) ([]byte, error) { +func (s *patch) Data(obj Object) ([]byte, error) { return s.data, nil } -// ConstantPatch constructs a new Patch with the given PatchType and data. -func ConstantPatch(patchType types.PatchType, data []byte) Patch { +// RawPatch constructs a new Patch with the given PatchType and data. +func RawPatch(patchType types.PatchType, data []byte) Patch { return &patch{patchType, data} } +// MergeFromWithOptimisticLock can be used if clients want to make sure a patch +// is being applied to the latest resource version of an object. +// +// The behavior is similar to what an Update would do, without the need to send the +// whole object. Usually this method is useful if you might have multiple clients +// acting on the same object and the same API version, but with different versions of the Go structs. +// +// For example, an "older" copy of a Widget that has fields A and B, and a "newer" copy with A, B, and C. +// Sending an update using the older struct definition results in C being dropped, whereas using a patch does not. +type MergeFromWithOptimisticLock struct{} + +// ApplyToMergeFrom applies this configuration to the given patch options. +func (m MergeFromWithOptimisticLock) ApplyToMergeFrom(in *MergeFromOptions) { + in.OptimisticLock = true +} + +// MergeFromOption is some configuration that modifies options for a merge-from patch data. +type MergeFromOption interface { + // ApplyToMergeFrom applies this configuration to the given patch options. + ApplyToMergeFrom(*MergeFromOptions) +} + +// MergeFromOptions contains options to generate a merge-from patch data. +type MergeFromOptions struct { + // OptimisticLock, when true, includes `metadata.resourceVersion` into the final + // patch data. If the `resourceVersion` field doesn't match what's stored, + // the operation results in a conflict and clients will need to try again. + OptimisticLock bool +} + type mergeFromPatch struct { - from runtime.Object + patchType types.PatchType + createPatch func(originalJSON, modifiedJSON []byte, dataStruct interface{}) ([]byte, error) + from Object + opts MergeFromOptions } -// Type implements patch. +// Type implements Patch. func (s *mergeFromPatch) Type() types.PatchType { - return types.MergePatchType + return s.patchType } // Data implements Patch. -func (s *mergeFromPatch) Data(obj runtime.Object) ([]byte, error) { - originalJSON, err := json.Marshal(s.from) +func (s *mergeFromPatch) Data(obj Object) ([]byte, error) { + original := s.from + modified := obj + + if s.opts.OptimisticLock { + version := original.GetResourceVersion() + if len(version) == 0 { + return nil, fmt.Errorf("cannot use OptimisticLock, object %q does not have any resource version we can use", original) + } + + original = original.DeepCopyObject().(Object) + original.SetResourceVersion("") + + modified = modified.DeepCopyObject().(Object) + modified.SetResourceVersion(version) + } + + originalJSON, err := json.Marshal(original) if err != nil { return nil, err } - modifiedJSON, err := json.Marshal(obj) + modifiedJSON, err := json.Marshal(modified) if err != nil { return nil, err } + data, err := s.createPatch(originalJSON, modifiedJSON, obj) + if err != nil { + return nil, err + } + + return data, nil +} + +func createMergePatch(originalJSON, modifiedJSON []byte, _ interface{}) ([]byte, error) { return jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) } +func createStrategicMergePatch(originalJSON, modifiedJSON []byte, dataStruct interface{}) ([]byte, error) { + return strategicpatch.CreateTwoWayMergePatch(originalJSON, modifiedJSON, dataStruct) +} + // MergeFrom creates a Patch that patches using the merge-patch strategy with the given object as base. -func MergeFrom(obj runtime.Object) Patch { - return &mergeFromPatch{obj} +// The difference between MergeFrom and StrategicMergeFrom lays in the handling of modified list fields. +// When using MergeFrom, existing lists will be completely replaced by new lists. +// When using StrategicMergeFrom, the list field's `patchStrategy` is respected if specified in the API type, +// e.g. the existing list is not replaced completely but rather merged with the new one using the list's `patchMergeKey`. +// See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ for more details on +// the difference between merge-patch and strategic-merge-patch. +func MergeFrom(obj Object) Patch { + return &mergeFromPatch{patchType: types.MergePatchType, createPatch: createMergePatch, from: obj} +} + +// MergeFromWithOptions creates a Patch that patches using the merge-patch strategy with the given object as base. +// See MergeFrom for more details. +func MergeFromWithOptions(obj Object, opts ...MergeFromOption) Patch { + options := &MergeFromOptions{} + for _, opt := range opts { + opt.ApplyToMergeFrom(options) + } + return &mergeFromPatch{patchType: types.MergePatchType, createPatch: createMergePatch, from: obj, opts: *options} +} + +// StrategicMergeFrom creates a Patch that patches using the strategic-merge-patch strategy with the given object as base. +// The difference between MergeFrom and StrategicMergeFrom lays in the handling of modified list fields. +// When using MergeFrom, existing lists will be completely replaced by new lists. +// When using StrategicMergeFrom, the list field's `patchStrategy` is respected if specified in the API type, +// e.g. the existing list is not replaced completely but rather merged with the new one using the list's `patchMergeKey`. +// See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ for more details on +// the difference between merge-patch and strategic-merge-patch. +// Please note, that CRDs don't support strategic-merge-patch, see +// https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility +func StrategicMergeFrom(obj Object, opts ...MergeFromOption) Patch { + options := &MergeFromOptions{} + for _, opt := range opts { + opt.ApplyToMergeFrom(options) + } + return &mergeFromPatch{patchType: types.StrategicMergePatchType, createPatch: createStrategicMergePatch, from: obj, opts: *options} +} + +// mergePatch uses a raw merge strategy to patch the object. +type mergePatch struct{} + +// Type implements Patch. +func (p mergePatch) Type() types.PatchType { + return types.MergePatchType +} + +// Data implements Patch. +func (p mergePatch) Data(obj Object) ([]byte, error) { + // NB(directxman12): we might technically want to be using an actual encoder + // here (in case some more performant encoder is introduced) but this is + // correct and sufficient for our uses (it's what the JSON serializer in + // client-go does, more-or-less). + return json.Marshal(obj) } // applyPatch uses server-side apply to patch the object. @@ -86,8 +206,8 @@ func (p applyPatch) Type() types.PatchType { } // Data implements Patch. -func (p applyPatch) Data(obj runtime.Object) ([]byte, error) { - // NB(directxman12): we might techically want to be using an actual encoder +func (p applyPatch) Data(obj Object) ([]byte, error) { + // NB(directxman12): we might technically want to be using an actual encoder // here (in case some more performant encoder is introduced) but this is // correct and sufficient for our uses (it's what the JSON serializer in // client-go does, more-or-less). diff --git a/pkg/client/patch_test.go b/pkg/client/patch_test.go new file mode 100644 index 0000000000..c9e105ae51 --- /dev/null +++ b/pkg/client/patch_test.go @@ -0,0 +1,122 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func BenchmarkMergeFrom(b *testing.B) { + cm1 := &corev1.ConfigMap{} + cm1.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) + cm1.ResourceVersion = "anything" + + cm2 := cm1.DeepCopy() + cm2.Data = map[string]string{"key": "value"} + + sts1 := &appsv1.StatefulSet{} + sts1.SetGroupVersionKind(appsv1.SchemeGroupVersion.WithKind("StatefulSet")) + sts1.ResourceVersion = "somesuch" + + sts2 := sts1.DeepCopy() + sts2.Spec.Template.Spec.Containers = []corev1.Container{{ + Resources: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("1m"), + corev1.ResourceMemory: resource.MustParse("1M"), + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{}, + }, + }, + Lifecycle: &corev1.Lifecycle{ + PreStop: &corev1.LifecycleHandler{ + HTTPGet: &corev1.HTTPGetAction{}, + }, + }, + SecurityContext: &corev1.SecurityContext{}, + }} + + b.Run("NoOptions", func(b *testing.B) { + cmPatch := MergeFrom(cm1) + if _, err := cmPatch.Data(cm2); err != nil { + b.Fatalf("expected no error, got %v", err) + } + + stsPatch := MergeFrom(sts1) + if _, err := stsPatch.Data(sts2); err != nil { + b.Fatalf("expected no error, got %v", err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = cmPatch.Data(cm2) + _, _ = stsPatch.Data(sts2) + } + }) + + b.Run("WithOptimisticLock", func(b *testing.B) { + cmPatch := MergeFromWithOptions(cm1, MergeFromWithOptimisticLock{}) + if _, err := cmPatch.Data(cm2); err != nil { + b.Fatalf("expected no error, got %v", err) + } + + stsPatch := MergeFromWithOptions(sts1, MergeFromWithOptimisticLock{}) + if _, err := stsPatch.Data(sts2); err != nil { + b.Fatalf("expected no error, got %v", err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = cmPatch.Data(cm2) + _, _ = stsPatch.Data(sts2) + } + }) +} + +var _ = Describe("MergeFrom", func() { + It("should successfully create a patch for two large and similar in64s", func() { + var largeInt64 int64 = 9223372036854775807 + var similarLargeInt64 int64 = 9223372036854775800 + j := batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "test", + }, + Spec: batchv1.JobSpec{ + ActiveDeadlineSeconds: &largeInt64, + }, + } + patch := MergeFrom(j.DeepCopy()) + + j.Spec.ActiveDeadlineSeconds = &similarLargeInt64 + + data, err := patch.Data(&j) + Expect(err).NotTo(HaveOccurred()) + Expect(data).To(Equal([]byte(`{"spec":{"activeDeadlineSeconds":9223372036854775800}}`))) + }) +}) diff --git a/pkg/client/split.go b/pkg/client/split.go deleted file mode 100644 index db7f16a717..0000000000 --- a/pkg/client/split.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "context" - - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" -) - -// DelegatingClient forms a Client by composing separate reader, writer and -// statusclient interfaces. This way, you can have an Client that reads from a -// cache and writes to the API server. -type DelegatingClient struct { - Reader - Writer - StatusClient -} - -// DelegatingReader forms a Reader that will cause Get and List requests for -// unstructured types to use the ClientReader while requests for any other type -// of object with use the CacheReader. This avoids accidentally caching the -// entire cluster in the common case of loading arbitrary unstructured objects -// (e.g. from OwnerReferences). -type DelegatingReader struct { - CacheReader Reader - ClientReader Reader -} - -// Get retrieves an obj for a given object key from the Kubernetes Cluster. -func (d *DelegatingReader) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { - _, isUnstructured := obj.(*unstructured.Unstructured) - if isUnstructured { - return d.ClientReader.Get(ctx, key, obj) - } - return d.CacheReader.Get(ctx, key, obj) -} - -// List retrieves list of objects for a given namespace and list options. -func (d *DelegatingReader) List(ctx context.Context, list runtime.Object, opts ...ListOptionFunc) error { - _, isUnstructured := list.(*unstructured.UnstructuredList) - if isUnstructured { - return d.ClientReader.List(ctx, list, opts...) - } - return d.CacheReader.List(ctx, list, opts...) -} diff --git a/pkg/client/testdata/examplecrd.yaml b/pkg/client/testdata/examplecrd.yaml new file mode 100644 index 0000000000..5409ee9789 --- /dev/null +++ b/pkg/client/testdata/examplecrd.yaml @@ -0,0 +1,17 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: chaospods.chaosapps.metamagical.io +spec: + group: chaosapps.metamagical.io + names: + kind: ChaosPod + plural: chaospods + scope: Namespaced + versions: + - name: "v1" + storage: true + served: true + schema: + openAPIV3Schema: + type: object diff --git a/pkg/client/typed_client.go b/pkg/client/typed_client.go index 76f429b650..66ae2e4a5c 100644 --- a/pkg/client/typed_client.go +++ b/pkg/client/typed_client.go @@ -18,77 +18,100 @@ package client import ( "context" + "fmt" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/util/apply" ) -// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes -// new clients at the time they are used, and caches the client. +var _ Reader = &typedClient{} +var _ Writer = &typedClient{} + type typedClient struct { - cache clientCache + resources *clientRestResources paramCodec runtime.ParameterCodec } -// Create implements client.Client -func (c *typedClient) Create(ctx context.Context, obj runtime.Object, opts ...CreateOptionFunc) error { - o, err := c.cache.getObjMeta(obj) +// Create implements client.Client. +func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + o, err := c.resources.getObjMeta(obj) if err != nil { return err } createOpts := &CreateOptions{} createOpts.ApplyOptions(opts) + return o.Post(). - NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + NamespaceIfScoped(o.namespace, o.isNamespaced()). Resource(o.resource()). Body(obj). VersionedParams(createOpts.AsCreateOptions(), c.paramCodec). - Context(ctx). - Do(). + Do(ctx). Into(obj) } -// Update implements client.Client -func (c *typedClient) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error { - o, err := c.cache.getObjMeta(obj) +// Update implements client.Client. +func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + o, err := c.resources.getObjMeta(obj) if err != nil { return err } updateOpts := &UpdateOptions{} updateOpts.ApplyOptions(opts) + return o.Put(). - NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + NamespaceIfScoped(o.namespace, o.isNamespaced()). Resource(o.resource()). - Name(o.GetName()). + Name(o.name). Body(obj). VersionedParams(updateOpts.AsUpdateOptions(), c.paramCodec). - Context(ctx). - Do(). + Do(ctx). Into(obj) } -// Delete implements client.Client -func (c *typedClient) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOptionFunc) error { - o, err := c.cache.getObjMeta(obj) +// Delete implements client.Client. +func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + o, err := c.resources.getObjMeta(obj) if err != nil { return err } deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + + return o.Delete(). + NamespaceIfScoped(o.namespace, o.isNamespaced()). + Resource(o.resource()). + Name(o.name). + Body(deleteOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// DeleteAllOf implements client.Client. +func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + o, err := c.resources.getObjMeta(obj) + if err != nil { + return err + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + return o.Delete(). - NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()). Resource(o.resource()). - Name(o.GetName()). - Body(deleteOpts.ApplyOptions(opts).AsDeleteOptions()). - Context(ctx). - Do(). + VersionedParams(deleteAllOfOpts.AsListOptions(), c.paramCodec). + Body(deleteAllOfOpts.AsDeleteOptions()). + Do(ctx). Error() } -// Patch implements client.Client -func (c *typedClient) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error { - o, err := c.cache.getObjMeta(obj) +// Patch implements client.Client. +func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -99,50 +122,126 @@ func (c *typedClient) Patch(ctx context.Context, obj runtime.Object, patch Patch } patchOpts := &PatchOptions{} + patchOpts.ApplyOptions(opts) + return o.Patch(patch.Type()). - NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + NamespaceIfScoped(o.namespace, o.isNamespaced()). Resource(o.resource()). - Name(o.GetName()). - VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec). + Name(o.name). + VersionedParams(patchOpts.AsPatchOptions(), c.paramCodec). Body(data). - Context(ctx). - Do(). + Do(ctx). Into(obj) } -// Get implements client.Client -func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { - r, err := c.cache.getResource(obj) +func (c *typedClient) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...ApplyOption) error { + o, err := c.resources.getObjMeta(obj) if err != nil { return err } + req, err := apply.NewRequest(o, obj) + if err != nil { + return fmt.Errorf("failed to create apply request: %w", err) + } + applyOpts := &ApplyOptions{} + applyOpts.ApplyOptions(opts) + + return req. + NamespaceIfScoped(o.namespace, o.isNamespaced()). + Resource(o.resource()). + Name(o.name). + VersionedParams(applyOpts.AsPatchOptions(), c.paramCodec). + Do(ctx). + // This is hacky, it is required because `Into` takes a `runtime.Object` and + // that is not implemented by the ApplyConfigurations. The generated clients + // don't have this problem because they deserialize into the api type, not the + // apply configuration: https://github.com/kubernetes/kubernetes/blob/22f5e01a37c0bc6a5f494dec14dd4e3688ee1d55/staging/src/k8s.io/client-go/gentype/type.go#L296-L317 + Into(runtimeObjectFromApplyConfiguration(obj)) +} + +// Get implements client.Client. +func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + r, err := c.resources.getResource(obj) + if err != nil { + return err + } + getOpts := GetOptions{} + getOpts.ApplyOptions(opts) return r.Get(). NamespaceIfScoped(key.Namespace, r.isNamespaced()). Resource(r.resource()). - Context(ctx). - Name(key.Name).Do().Into(obj) + VersionedParams(getOpts.AsGetOptions(), c.paramCodec). + Name(key.Name).Do(ctx).Into(obj) } -// List implements client.Client -func (c *typedClient) List(ctx context.Context, obj runtime.Object, opts ...ListOptionFunc) error { - r, err := c.cache.getResource(obj) +// List implements client.Client. +func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + r, err := c.resources.getResource(obj) if err != nil { return err } + listOpts := ListOptions{} listOpts.ApplyOptions(opts) + return r.Get(). NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). Resource(r.resource()). VersionedParams(listOpts.AsListOptions(), c.paramCodec). - Context(ctx). - Do(). + Do(ctx). Into(obj) } -// UpdateStatus used by StatusWriter to write status. -func (c *typedClient) UpdateStatus(ctx context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error { - o, err := c.cache.getObjMeta(obj) +func (c *typedClient) GetSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceGetOption) error { + o, err := c.resources.getObjMeta(obj) + if err != nil { + return err + } + + if subResourceObj.GetName() == "" { + subResourceObj.SetName(obj.GetName()) + } + + getOpts := &SubResourceGetOptions{} + getOpts.ApplyOptions(opts) + + return o.Get(). + NamespaceIfScoped(o.namespace, o.isNamespaced()). + Resource(o.resource()). + Name(o.name). + SubResource(subResource). + VersionedParams(getOpts.AsGetOptions(), c.paramCodec). + Do(ctx). + Into(subResourceObj) +} + +func (c *typedClient) CreateSubResource(ctx context.Context, obj Object, subResourceObj Object, subResource string, opts ...SubResourceCreateOption) error { + o, err := c.resources.getObjMeta(obj) + if err != nil { + return err + } + + if subResourceObj.GetName() == "" { + subResourceObj.SetName(obj.GetName()) + } + + createOpts := &SubResourceCreateOptions{} + createOpts.ApplyOptions(opts) + + return o.Post(). + NamespaceIfScoped(o.namespace, o.isNamespaced()). + Resource(o.resource()). + Name(o.name). + SubResource(subResource). + Body(subResourceObj). + VersionedParams(createOpts.AsCreateOptions(), c.paramCodec). + Do(ctx). + Into(subResourceObj) +} + +// UpdateSubResource used by SubResourceWriter to write status. +func (c *typedClient) UpdateSubResource(ctx context.Context, obj Object, subResource string, opts ...SubResourceUpdateOption) error { + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -150,39 +249,91 @@ func (c *typedClient) UpdateStatus(ctx context.Context, obj runtime.Object, opts // wrapped to improve the UX ? // It will be nice to receive an error saying the object doesn't implement // status subresource and check CRD definition + updateOpts := &SubResourceUpdateOptions{} + updateOpts.ApplyOptions(opts) + + body := obj + if updateOpts.SubResourceBody != nil { + body = updateOpts.SubResourceBody + } + if body.GetName() == "" { + body.SetName(obj.GetName()) + } + if body.GetNamespace() == "" { + body.SetNamespace(obj.GetNamespace()) + } + return o.Put(). - NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + NamespaceIfScoped(o.namespace, o.isNamespaced()). Resource(o.resource()). - Name(o.GetName()). - SubResource("status"). - Body(obj). - VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), c.paramCodec). - Context(ctx). - Do(). - Into(obj) + Name(o.name). + SubResource(subResource). + Body(body). + VersionedParams(updateOpts.AsUpdateOptions(), c.paramCodec). + Do(ctx). + Into(body) } -// PatchStatus used by StatusWriter to write status. -func (c *typedClient) PatchStatus(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error { - o, err := c.cache.getObjMeta(obj) +// PatchSubResource used by SubResourceWriter to write subresource. +func (c *typedClient) PatchSubResource(ctx context.Context, obj Object, subResource string, patch Patch, opts ...SubResourcePatchOption) error { + o, err := c.resources.getObjMeta(obj) if err != nil { return err } - data, err := patch.Data(obj) + patchOpts := &SubResourcePatchOptions{} + patchOpts.ApplyOptions(opts) + + body := obj + if patchOpts.SubResourceBody != nil { + body = patchOpts.SubResourceBody + } + + data, err := patch.Data(body) if err != nil { return err } - patchOpts := &PatchOptions{} return o.Patch(patch.Type()). - NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + NamespaceIfScoped(o.namespace, o.isNamespaced()). Resource(o.resource()). - Name(o.GetName()). - SubResource("status"). + Name(o.name). + SubResource(subResource). Body(data). - VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec). - Context(ctx). - Do(). - Into(obj) + VersionedParams(patchOpts.AsPatchOptions(), c.paramCodec). + Do(ctx). + Into(body) +} + +func (c *typedClient) ApplySubResource(ctx context.Context, obj runtime.ApplyConfiguration, subResource string, opts ...SubResourceApplyOption) error { + o, err := c.resources.getObjMeta(obj) + if err != nil { + return err + } + + applyOpts := &SubResourceApplyOptions{} + applyOpts.ApplyOpts(opts) + + body := obj + if applyOpts.SubResourceBody != nil { + body = applyOpts.SubResourceBody + } + + req, err := apply.NewRequest(o, body) + if err != nil { + return fmt.Errorf("failed to create apply request: %w", err) + } + + return req. + NamespaceIfScoped(o.namespace, o.isNamespaced()). + Resource(o.resource()). + Name(o.name). + SubResource(subResource). + VersionedParams(applyOpts.AsPatchOptions(), c.paramCodec). + Do(ctx). + // This is hacky, it is required because `Into` takes a `runtime.Object` and + // that is not implemented by the ApplyConfigurations. The generated clients + // don't have this problem because they deserialize into the api type, not the + // apply configuration: https://github.com/kubernetes/kubernetes/blob/22f5e01a37c0bc6a5f494dec14dd4e3688ee1d55/staging/src/k8s.io/client-go/gentype/type.go#L296-L317 + Into(runtimeObjectFromApplyConfiguration(obj)) } diff --git a/pkg/client/unstructured_client.go b/pkg/client/unstructured_client.go index f13dd18854..d2ea6d7a32 100644 --- a/pkg/client/unstructured_client.go +++ b/pkg/client/unstructured_client.go @@ -21,83 +21,130 @@ import ( "fmt" "strings" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/dynamic" + "k8s.io/client-go/util/apply" ) -// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes -// new clients at the time they are used, and caches the client. +var _ Reader = &unstructuredClient{} +var _ Writer = &unstructuredClient{} + type unstructuredClient struct { - client dynamic.Interface - restMapper meta.RESTMapper + resources *clientRestResources + paramCodec runtime.ParameterCodec } -// Create implements client.Client -func (uc *unstructuredClient) Create(_ context.Context, obj runtime.Object, opts ...CreateOptionFunc) error { - u, ok := obj.(*unstructured.Unstructured) +// Create implements client.Client. +func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - createOpts := CreateOptions{} - createOpts.ApplyOptions(opts) - r, err := uc.getResourceInterface(u.GroupVersionKind(), u.GetNamespace()) - if err != nil { - return err - } - i, err := r.Create(u, *createOpts.AsCreateOptions()) + + gvk := u.GetObjectKind().GroupVersionKind() + + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } - u.Object = i.Object - return nil + + createOpts := &CreateOptions{} + createOpts.ApplyOptions(opts) + + result := o.Post(). + NamespaceIfScoped(o.namespace, o.isNamespaced()). + Resource(o.resource()). + Body(obj). + VersionedParams(createOpts.AsCreateOptions(), uc.paramCodec). + Do(ctx). + Into(obj) + + u.GetObjectKind().SetGroupVersionKind(gvk) + return result } -// Update implements client.Client -func (uc *unstructuredClient) Update(_ context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error { - u, ok := obj.(*unstructured.Unstructured) +// Update implements client.Client. +func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - updateOpts := UpdateOptions{} - updateOpts.ApplyOptions(opts) - r, err := uc.getResourceInterface(u.GroupVersionKind(), u.GetNamespace()) + + gvk := u.GetObjectKind().GroupVersionKind() + + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } - i, err := r.Update(u, *updateOpts.AsUpdateOptions()) + + updateOpts := UpdateOptions{} + updateOpts.ApplyOptions(opts) + + result := o.Put(). + NamespaceIfScoped(o.namespace, o.isNamespaced()). + Resource(o.resource()). + Name(o.name). + Body(obj). + VersionedParams(updateOpts.AsUpdateOptions(), uc.paramCodec). + Do(ctx). + Into(obj) + + u.GetObjectKind().SetGroupVersionKind(gvk) + return result +} + +// Delete implements client.Client. +func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + if _, ok := obj.(runtime.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } - u.Object = i.Object - return nil + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + + return o.Delete(). + NamespaceIfScoped(o.namespace, o.isNamespaced()). + Resource(o.resource()). + Name(o.name). + Body(deleteOpts.AsDeleteOptions()). + Do(ctx). + Error() } -// Delete implements client.Client -func (uc *unstructuredClient) Delete(_ context.Context, obj runtime.Object, opts ...DeleteOptionFunc) error { - u, ok := obj.(*unstructured.Unstructured) - if !ok { +// DeleteAllOf implements client.Client. +func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - r, err := uc.getResourceInterface(u.GroupVersionKind(), u.GetNamespace()) + + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } - deleteOpts := DeleteOptions{} - err = r.Delete(u.GetName(), deleteOpts.ApplyOptions(opts).AsDeleteOptions()) - return err + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + + return o.Delete(). + NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()). + Resource(o.resource()). + VersionedParams(deleteAllOfOpts.AsListOptions(), uc.paramCodec). + Body(deleteAllOfOpts.AsDeleteOptions()). + Do(ctx). + Error() } -// Patch implements client.Client -func (uc *unstructuredClient) Patch(_ context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error { - u, ok := obj.(*unstructured.Unstructured) - if !ok { +// Patch implements client.Client. +func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - r, err := uc.getResourceInterface(u.GroupVersionKind(), u.GetNamespace()) + + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -108,105 +155,266 @@ func (uc *unstructuredClient) Patch(_ context.Context, obj runtime.Object, patch } patchOpts := &PatchOptions{} - i, err := r.Patch(u.GetName(), patch.Type(), data, *patchOpts.ApplyOptions(opts).AsPatchOptions()) + patchOpts.ApplyOptions(opts) + + return o.Patch(patch.Type()). + NamespaceIfScoped(o.namespace, o.isNamespaced()). + Resource(o.resource()). + Name(o.name). + VersionedParams(patchOpts.AsPatchOptions(), uc.paramCodec). + Body(data). + Do(ctx). + Into(obj) +} + +func (uc *unstructuredClient) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...ApplyOption) error { + unstructuredApplyConfig, ok := obj.(*unstructuredApplyConfiguration) + if !ok { + return fmt.Errorf("bug: unstructured client got an applyconfiguration that was not %T but %T", &unstructuredApplyConfiguration{}, obj) + } + o, err := uc.resources.getObjMeta(unstructuredApplyConfig.Unstructured) if err != nil { return err } - u.Object = i.Object - return nil + + req, err := apply.NewRequest(o, obj) + if err != nil { + return fmt.Errorf("failed to create apply request: %w", err) + } + applyOpts := &ApplyOptions{} + applyOpts.ApplyOptions(opts) + + return req. + NamespaceIfScoped(o.namespace, o.isNamespaced()). + Resource(o.resource()). + Name(o.name). + VersionedParams(applyOpts.AsPatchOptions(), uc.paramCodec). + Do(ctx). + Into(unstructuredApplyConfig.Unstructured) } -// Get implements client.Client -func (uc *unstructuredClient) Get(_ context.Context, key ObjectKey, obj runtime.Object) error { - u, ok := obj.(*unstructured.Unstructured) +// Get implements client.Client. +func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - r, err := uc.getResourceInterface(u.GroupVersionKind(), key.Namespace) - if err != nil { - return err - } - i, err := r.Get(key.Name, metav1.GetOptions{}) + + gvk := u.GetObjectKind().GroupVersionKind() + + getOpts := GetOptions{} + getOpts.ApplyOptions(opts) + + r, err := uc.resources.getResource(obj) if err != nil { return err } - u.Object = i.Object - return nil + + result := r.Get(). + NamespaceIfScoped(key.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(getOpts.AsGetOptions(), uc.paramCodec). + Name(key.Name). + Do(ctx). + Into(obj) + + u.GetObjectKind().SetGroupVersionKind(gvk) + + return result } -// List implements client.Client -func (uc *unstructuredClient) List(_ context.Context, obj runtime.Object, opts ...ListOptionFunc) error { - u, ok := obj.(*unstructured.UnstructuredList) +// List implements client.Client. +func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() - if strings.HasSuffix(gvk.Kind, "List") { - gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + + gvk := u.GetObjectKind().GroupVersionKind() + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + r, err := uc.resources.getResource(obj) + if err != nil { + return err } + listOpts := ListOptions{} listOpts.ApplyOptions(opts) - r, err := uc.getResourceInterface(gvk, listOpts.Namespace) - if err != nil { - return err + + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), uc.paramCodec). + Do(ctx). + Into(obj) +} + +func (uc *unstructuredClient) GetSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceGetOption) error { + if _, ok := obj.(runtime.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + if _, ok := subResourceObj.(runtime.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", subResourceObj) + } + + if subResourceObj.GetName() == "" { + subResourceObj.SetName(obj.GetName()) } - i, err := r.List(*listOpts.AsListOptions()) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } - u.Items = i.Items - u.Object = i.Object - return nil + + getOpts := &SubResourceGetOptions{} + getOpts.ApplyOptions(opts) + + return o.Get(). + NamespaceIfScoped(o.namespace, o.isNamespaced()). + Resource(o.resource()). + Name(o.name). + SubResource(subResource). + VersionedParams(getOpts.AsGetOptions(), uc.paramCodec). + Do(ctx). + Into(subResourceObj) } -func (uc *unstructuredClient) UpdateStatus(_ context.Context, obj runtime.Object, opts ...UpdateOptionFunc) error { - u, ok := obj.(*unstructured.Unstructured) - if !ok { +func (uc *unstructuredClient) CreateSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceCreateOption) error { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - r, err := uc.getResourceInterface(u.GroupVersionKind(), u.GetNamespace()) + + if _, ok := subResourceObj.(runtime.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", subResourceObj) + } + + if subResourceObj.GetName() == "" { + subResourceObj.SetName(obj.GetName()) + } + + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } - i, err := r.UpdateStatus(u, *(&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions()) + + createOpts := &SubResourceCreateOptions{} + createOpts.ApplyOptions(opts) + + return o.Post(). + NamespaceIfScoped(o.namespace, o.isNamespaced()). + Resource(o.resource()). + Name(o.name). + SubResource(subResource). + Body(subResourceObj). + VersionedParams(createOpts.AsCreateOptions(), uc.paramCodec). + Do(ctx). + Into(subResourceObj) +} + +func (uc *unstructuredClient) UpdateSubResource(ctx context.Context, obj Object, subResource string, opts ...SubResourceUpdateOption) error { + if _, ok := obj.(runtime.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } - u.Object = i.Object - return nil + + updateOpts := SubResourceUpdateOptions{} + updateOpts.ApplyOptions(opts) + + body := obj + if updateOpts.SubResourceBody != nil { + body = updateOpts.SubResourceBody + } + if body.GetName() == "" { + body.SetName(obj.GetName()) + } + if body.GetNamespace() == "" { + body.SetNamespace(obj.GetNamespace()) + } + + return o.Put(). + NamespaceIfScoped(o.namespace, o.isNamespaced()). + Resource(o.resource()). + Name(o.name). + SubResource(subResource). + Body(body). + VersionedParams(updateOpts.AsUpdateOptions(), uc.paramCodec). + Do(ctx). + Into(body) } -func (uc *unstructuredClient) PatchStatus(_ context.Context, obj runtime.Object, patch Patch, opts ...PatchOptionFunc) error { - u, ok := obj.(*unstructured.Unstructured) +func (uc *unstructuredClient) PatchSubResource(ctx context.Context, obj Object, subResource string, patch Patch, opts ...SubResourcePatchOption) error { + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - r, err := uc.getResourceInterface(u.GroupVersionKind(), u.GetNamespace()) + + gvk := u.GetObjectKind().GroupVersionKind() + + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } - data, err := patch.Data(obj) - if err != nil { - return err + patchOpts := &SubResourcePatchOptions{} + patchOpts.ApplyOptions(opts) + + body := obj + if patchOpts.SubResourceBody != nil { + body = patchOpts.SubResourceBody } - i, err := r.Patch(u.GetName(), patch.Type(), data, *(&PatchOptions{}).ApplyOptions(opts).AsPatchOptions(), "status") + data, err := patch.Data(body) if err != nil { return err } - u.Object = i.Object - return nil + + result := o.Patch(patch.Type()). + NamespaceIfScoped(o.namespace, o.isNamespaced()). + Resource(o.resource()). + Name(o.name). + SubResource(subResource). + Body(data). + VersionedParams(patchOpts.AsPatchOptions(), uc.paramCodec). + Do(ctx). + Into(body) + + u.GetObjectKind().SetGroupVersionKind(gvk) + return result } -func (uc *unstructuredClient) getResourceInterface(gvk schema.GroupVersionKind, ns string) (dynamic.ResourceInterface, error) { - mapping, err := uc.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) +func (uc *unstructuredClient) ApplySubResource(ctx context.Context, obj runtime.ApplyConfiguration, subResource string, opts ...SubResourceApplyOption) error { + unstructuredApplyConfig, ok := obj.(*unstructuredApplyConfiguration) + if !ok { + return fmt.Errorf("bug: unstructured client got an applyconfiguration that was not %T but %T", &unstructuredApplyConfiguration{}, obj) + } + o, err := uc.resources.getObjMeta(unstructuredApplyConfig.Unstructured) if err != nil { - return nil, err + return err + } + + applyOpts := &SubResourceApplyOptions{} + applyOpts.ApplyOpts(opts) + + body := obj + if applyOpts.SubResourceBody != nil { + body = applyOpts.SubResourceBody } - if mapping.Scope.Name() == meta.RESTScopeNameRoot { - return uc.client.Resource(mapping.Resource), nil + req, err := apply.NewRequest(o, body) + if err != nil { + return fmt.Errorf("failed to create apply request: %w", err) } - return uc.client.Resource(mapping.Resource).Namespace(ns), nil + + return req. + NamespaceIfScoped(o.namespace, o.isNamespaced()). + Resource(o.resource()). + Name(o.name). + SubResource(subResource). + VersionedParams(applyOpts.AsPatchOptions(), uc.paramCodec). + Do(ctx). + Into(unstructuredApplyConfig.Unstructured) } diff --git a/pkg/client/watch.go b/pkg/client/watch.go new file mode 100644 index 0000000000..181b22a673 --- /dev/null +++ b/pkg/client/watch.go @@ -0,0 +1,106 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/rest" +) + +// NewWithWatch returns a new WithWatch. +func NewWithWatch(config *rest.Config, options Options) (WithWatch, error) { + client, err := newClient(config, options) + if err != nil { + return nil, err + } + return &watchingClient{client: client}, nil +} + +type watchingClient struct { + *client +} + +func (w *watchingClient) Watch(ctx context.Context, list ObjectList, opts ...ListOption) (watch.Interface, error) { + switch l := list.(type) { + case runtime.Unstructured: + return w.unstructuredWatch(ctx, l, opts...) + case *metav1.PartialObjectMetadataList: + return w.metadataWatch(ctx, l, opts...) + default: + return w.typedWatch(ctx, l, opts...) + } +} + +func (w *watchingClient) listOpts(opts ...ListOption) ListOptions { + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + if listOpts.Raw == nil { + listOpts.Raw = &metav1.ListOptions{} + } + listOpts.Raw.Watch = true + + return listOpts +} + +func (w *watchingClient) metadataWatch(ctx context.Context, obj *metav1.PartialObjectMetadataList, opts ...ListOption) (watch.Interface, error) { + gvk := obj.GroupVersionKind() + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + listOpts := w.listOpts(opts...) + + resInt, err := w.client.metadataClient.getResourceInterface(gvk, listOpts.Namespace) + if err != nil { + return nil, err + } + + return resInt.Watch(ctx, *listOpts.AsListOptions()) +} + +func (w *watchingClient) unstructuredWatch(ctx context.Context, obj runtime.Unstructured, opts ...ListOption) (watch.Interface, error) { + r, err := w.client.unstructuredClient.resources.getResource(obj) + if err != nil { + return nil, err + } + + listOpts := w.listOpts(opts...) + + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), w.client.unstructuredClient.paramCodec). + Watch(ctx) +} + +func (w *watchingClient) typedWatch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error) { + r, err := w.client.typedClient.resources.getResource(obj) + if err != nil { + return nil, err + } + + listOpts := w.listOpts(opts...) + + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), w.client.typedClient.paramCodec). + Watch(ctx) +} diff --git a/pkg/client/watch_test.go b/pkg/client/watch_test.go new file mode 100644 index 0000000000..8d5b3344d3 --- /dev/null +++ b/pkg/client/watch_test.go @@ -0,0 +1,132 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client_test + +import ( + "context" + "fmt" + "sync/atomic" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("ClientWithWatch", func() { + var dep *appsv1.Deployment + var count uint64 = 0 + var replicaCount int32 = 2 + var ns = "kube-public" + + BeforeEach(func(ctx SpecContext) { + atomic.AddUint64(&count, 1) + dep = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("watch-deployment-name-%v", count), Namespace: ns, Labels: map[string]string{"app": fmt.Sprintf("bar-%v", count)}}, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicaCount, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + + var err error + dep, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func(ctx SpecContext) { + deleteDeployment(ctx, dep, ns) + }) + + Describe("NewWithWatch", func() { + It("should return a new Client", func(ctx SpecContext) { + cl, err := client.NewWithWatch(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + }) + + watchSuite := func(ctx context.Context, through client.ObjectList, expectedType client.Object, checkGvk bool) { + cl, err := client.NewWithWatch(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + watchInterface, err := cl.Watch(ctx, through, &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("metadata.name", dep.Name), + Namespace: dep.Namespace, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(watchInterface).NotTo(BeNil()) + + defer watchInterface.Stop() + + event, ok := <-watchInterface.ResultChan() + Expect(ok).To(BeTrue()) + Expect(event.Type).To(BeIdenticalTo(watch.Added)) + Expect(event.Object).To(BeAssignableToTypeOf(expectedType)) + + // The metadata client doesn't set GVK so we just use the + // name and UID as a proxy to confirm that we got the right + // object. + metaObject, ok := event.Object.(metav1.Object) + Expect(ok).To(BeTrue()) + Expect(metaObject.GetName()).To(Equal(dep.Name)) + Expect(metaObject.GetUID()).To(Equal(dep.UID)) + + if checkGvk { + runtimeObject := event.Object + gvk := runtimeObject.GetObjectKind().GroupVersionKind() + Expect(gvk).To(Equal(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + })) + } + } + + It("should receive a create event when watching the typed object", func(ctx SpecContext) { + watchSuite(ctx, &appsv1.DeploymentList{}, &appsv1.Deployment{}, false) + }) + + It("should receive a create event when watching the unstructured object", func(ctx SpecContext) { + u := &unstructured.UnstructuredList{} + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Kind: "Deployment", + Version: "v1", + }) + watchSuite(ctx, u, &unstructured.Unstructured{}, true) + }) + + It("should receive a create event when watching the metadata object", func(ctx SpecContext) { + m := &metav1.PartialObjectMetadataList{TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"}} + watchSuite(ctx, m, &metav1.PartialObjectMetadata{}, false) + }) + }) + +}) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go new file mode 100644 index 0000000000..0603f4cde5 --- /dev/null +++ b/pkg/cluster/cluster.go @@ -0,0 +1,302 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "context" + "errors" + "net/http" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" +) + +// Cluster provides various methods to interact with a cluster. +type Cluster interface { + // GetHTTPClient returns an HTTP client that can be used to talk to the apiserver + GetHTTPClient() *http.Client + + // GetConfig returns an initialized Config + GetConfig() *rest.Config + + // GetCache returns a cache.Cache + GetCache() cache.Cache + + // GetScheme returns an initialized Scheme + GetScheme() *runtime.Scheme + + // GetClient returns a client configured with the Config. This client may + // not be a fully "direct" client -- it may read from a cache, for + // instance. See Options.NewClient for more information on how the default + // implementation works. + GetClient() client.Client + + // GetFieldIndexer returns a client.FieldIndexer configured with the client + GetFieldIndexer() client.FieldIndexer + + // GetEventRecorderFor returns a new EventRecorder for the provided name + GetEventRecorderFor(name string) record.EventRecorder + + // GetRESTMapper returns a RESTMapper + GetRESTMapper() meta.RESTMapper + + // GetAPIReader returns a reader that will be configured to use the API server directly. + // This should be used sparingly and only when the cached client does not fit your + // use case. + GetAPIReader() client.Reader + + // Start starts the cluster + Start(ctx context.Context) error +} + +// Options are the possible options that can be configured for a Cluster. +type Options struct { + // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources + // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better + // idea to pass your own scheme in. See the documentation in pkg/scheme for more information. + Scheme *runtime.Scheme + + // MapperProvider provides the rest mapper used to map go types to Kubernetes APIs + MapperProvider func(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) + + // Logger is the logger that should be used by this Cluster. + // If none is set, it defaults to log.Log global logger. + Logger logr.Logger + + // HTTPClient is the http client that will be used to create the default + // Cache and Client. If not set the rest.HTTPClientFor function will be used + // to create the http client. + HTTPClient *http.Client + + // Cache is the cache.Options that will be used to create the default Cache. + // By default, the cache will watch and list requested objects in all namespaces. + Cache cache.Options + + // NewCache is the function that will create the cache to be used + // by the manager. If not set this will use the default new cache function. + // + // When using a custom NewCache, the Cache options will be passed to the + // NewCache function. + // + // NOTE: LOW LEVEL PRIMITIVE! + // Only use a custom NewCache if you know what you are doing. + NewCache cache.NewCacheFunc + + // Client is the client.Options that will be used to create the default Client. + // By default, the client will use the cache for reads and direct calls for writes. + Client client.Options + + // NewClient is the func that creates the client to be used by the manager. + // If not set this will create a Client backed by a Cache for read operations + // and a direct Client for write operations. + // + // When using a custom NewClient, the Client options will be passed to the + // NewClient function. + // + // NOTE: LOW LEVEL PRIMITIVE! + // Only use a custom NewClient if you know what you are doing. + NewClient client.NewClientFunc + + // EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API + // Use this to customize the event correlator and spam filter + // + // Deprecated: using this may cause goroutine leaks if the lifetime of your manager or controllers + // is shorter than the lifetime of your process. + EventBroadcaster record.EventBroadcaster + + // makeBroadcaster allows deferring the creation of the broadcaster to + // avoid leaking goroutines if we never call Start on this manager. It also + // returns whether or not this is a "owned" broadcaster, and as such should be + // stopped with the manager. + makeBroadcaster intrec.EventBroadcasterProducer + + // Dependency injection for testing + newRecorderProvider func(config *rest.Config, httpClient *http.Client, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) +} + +// Option can be used to manipulate Options. +type Option func(*Options) + +// New constructs a brand new cluster. +func New(config *rest.Config, opts ...Option) (Cluster, error) { + if config == nil { + return nil, errors.New("must specify Config") + } + + originalConfig := config + + config = rest.CopyConfig(config) + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + options := Options{} + for _, opt := range opts { + opt(&options) + } + options, err := setOptionsDefaults(options, config) + if err != nil { + options.Logger.Error(err, "Failed to set defaults") + return nil, err + } + + // Create the mapper provider + mapper, err := options.MapperProvider(config, options.HTTPClient) + if err != nil { + options.Logger.Error(err, "Failed to get API Group-Resources") + return nil, err + } + + // Create the cache for the cached read client and registering informers + cacheOpts := options.Cache + { + if cacheOpts.Scheme == nil { + cacheOpts.Scheme = options.Scheme + } + if cacheOpts.Mapper == nil { + cacheOpts.Mapper = mapper + } + if cacheOpts.HTTPClient == nil { + cacheOpts.HTTPClient = options.HTTPClient + } + } + cache, err := options.NewCache(config, cacheOpts) + if err != nil { + return nil, err + } + + // Create the client, and default its options. + clientOpts := options.Client + { + if clientOpts.Scheme == nil { + clientOpts.Scheme = options.Scheme + } + if clientOpts.Mapper == nil { + clientOpts.Mapper = mapper + } + if clientOpts.HTTPClient == nil { + clientOpts.HTTPClient = options.HTTPClient + } + if clientOpts.Cache == nil { + clientOpts.Cache = &client.CacheOptions{ + Unstructured: false, + } + } + if clientOpts.Cache.Reader == nil { + clientOpts.Cache.Reader = cache + } + } + clientWriter, err := options.NewClient(config, clientOpts) + if err != nil { + return nil, err + } + + // Create the API Reader, a client with no cache. + clientReader, err := client.New(config, client.Options{ + HTTPClient: options.HTTPClient, + Scheme: options.Scheme, + Mapper: mapper, + }) + if err != nil { + return nil, err + } + + // Create the recorder provider to inject event recorders for the components. + // TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific + // to the particular controller that it's being injected into, rather than a generic one like is here. + recorderProvider, err := options.newRecorderProvider(config, options.HTTPClient, options.Scheme, options.Logger.WithName("events"), options.makeBroadcaster) + if err != nil { + return nil, err + } + + return &cluster{ + config: originalConfig, + httpClient: options.HTTPClient, + scheme: options.Scheme, + cache: cache, + fieldIndexes: cache, + client: clientWriter, + apiReader: clientReader, + recorderProvider: recorderProvider, + mapper: mapper, + logger: options.Logger, + }, nil +} + +// setOptionsDefaults set default values for Options fields. +func setOptionsDefaults(options Options, config *rest.Config) (Options, error) { + if options.HTTPClient == nil { + var err error + options.HTTPClient, err = rest.HTTPClientFor(config) + if err != nil { + return options, err + } + } + + // Use the Kubernetes client-go scheme if none is specified + if options.Scheme == nil { + options.Scheme = scheme.Scheme + } + + if options.MapperProvider == nil { + options.MapperProvider = apiutil.NewDynamicRESTMapper + } + + // Allow users to define how to create a new client + if options.NewClient == nil { + options.NewClient = client.New + } + + // Allow newCache to be mocked + if options.NewCache == nil { + options.NewCache = cache.New + } + + // Allow newRecorderProvider to be mocked + if options.newRecorderProvider == nil { + options.newRecorderProvider = intrec.NewProvider + } + + // This is duplicated with pkg/manager, we need it here to provide + // the user with an EventBroadcaster and there for the Leader election + if options.EventBroadcaster == nil { + // defer initialization to avoid leaking by default + options.makeBroadcaster = func() (record.EventBroadcaster, bool) { + return record.NewBroadcaster(), true + } + } else { + options.makeBroadcaster = func() (record.EventBroadcaster, bool) { + return options.EventBroadcaster, false + } + } + + if options.Logger.GetSink() == nil { + options.Logger = logf.RuntimeLog.WithName("cluster") + } + + return options, nil +} diff --git a/pkg/cluster/cluster_suite_test.go b/pkg/cluster/cluster_suite_test.go new file mode 100644 index 0000000000..dc1f9ac778 --- /dev/null +++ b/pkg/cluster/cluster_suite_test.go @@ -0,0 +1,68 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "net/http" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Cluster Suite") +} + +var testenv *envtest.Environment +var cfg *rest.Config +var clientset *kubernetes.Clientset + +// clientTransport is used to force-close keep-alives in tests that check for leaks. +var clientTransport *http.Transport + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + testenv = &envtest.Environment{} + + var err error + cfg, err = testenv.Start() + Expect(err).NotTo(HaveOccurred()) + + cfg.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { + // NB(directxman12): we can't set Transport *and* use TLS options, + // so we grab the transport right after it gets created so that we can + // type-assert on it (hopefully)? + // hopefully this doesn't break 🤞 + clientTransport = rt.(*http.Transport) + return rt + } + + clientset, err = kubernetes.NewForConfig(cfg) + Expect(err).NotTo(HaveOccurred()) +}) + +var _ = AfterSuite(func() { + Expect(testenv.Stop()).To(Succeed()) +}) diff --git a/pkg/cluster/cluster_test.go b/pkg/cluster/cluster_test.go new file mode 100644 index 0000000000..c08a742403 --- /dev/null +++ b/pkg/cluster/cluster_test.go @@ -0,0 +1,170 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "go.uber.org/goleak" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" +) + +var _ = Describe("cluster.Cluster", func() { + Describe("New", func() { + It("should return an error if there is no Config", func() { + c, err := New(nil) + Expect(c).To(BeNil()) + Expect(err.Error()).To(ContainSubstring("must specify Config")) + + }) + + It("should return an error if it can't create a RestMapper", func() { + expected := fmt.Errorf("expected error: RestMapper") + c, err := New(cfg, func(o *Options) { + o.MapperProvider = func(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { return nil, expected } + }) + Expect(c).To(BeNil()) + Expect(err).To(Equal(expected)) + + }) + + It("should return an error it can't create a client.Client", func() { + c, err := New(cfg, func(o *Options) { + o.NewClient = func(config *rest.Config, options client.Options) (client.Client, error) { + return nil, errors.New("expected error") + } + }) + Expect(c).To(BeNil()) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("expected error")) + }) + + It("should return an error it can't create a cache.Cache", func() { + c, err := New(cfg, func(o *Options) { + o.NewCache = func(config *rest.Config, opts cache.Options) (cache.Cache, error) { + return nil, fmt.Errorf("expected error") + } + }) + Expect(c).To(BeNil()) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("expected error")) + }) + + It("should create a client defined in by the new client function", func() { + c, err := New(cfg, func(o *Options) { + o.NewClient = func(config *rest.Config, options client.Options) (client.Client, error) { + return nil, nil + } + }) + Expect(c).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + Expect(c.GetClient()).To(BeNil()) + }) + + It("should return an error it can't create a recorder.Provider", func() { + c, err := New(cfg, func(o *Options) { + o.newRecorderProvider = func(_ *rest.Config, _ *http.Client, _ *runtime.Scheme, _ logr.Logger, _ intrec.EventBroadcasterProducer) (*intrec.Provider, error) { + return nil, fmt.Errorf("expected error") + } + }) + Expect(c).To(BeNil()) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("expected error")) + }) + + }) + + Describe("Start", func() { + It("should stop when context is cancelled", func(specCtx SpecContext) { + c, err := New(cfg) + Expect(err).NotTo(HaveOccurred()) + ctx, cancel := context.WithCancel(specCtx) + cancel() + Expect(c.Start(ctx)).NotTo(HaveOccurred()) + }) + }) + + It("should not leak goroutines when stopped", func(specCtx SpecContext) { + currentGRs := goleak.IgnoreCurrent() + + c, err := New(cfg) + Expect(err).NotTo(HaveOccurred()) + + ctx, cancel := context.WithCancel(specCtx) + cancel() + Expect(c.Start(ctx)).NotTo(HaveOccurred()) + + // force-close keep-alive connections. These'll time anyway (after + // like 30s or so) but force it to speed up the tests. + clientTransport.CloseIdleConnections() + Eventually(func() error { return goleak.Find(currentGRs) }).Should(Succeed()) + }) + + It("should provide a function to get the Config", func() { + c, err := New(cfg) + Expect(err).NotTo(HaveOccurred()) + cluster, ok := c.(*cluster) + Expect(ok).To(BeTrue()) + Expect(c.GetConfig()).To(Equal(cluster.config)) + }) + + It("should provide a function to get the Client", func() { + c, err := New(cfg) + Expect(err).NotTo(HaveOccurred()) + cluster, ok := c.(*cluster) + Expect(ok).To(BeTrue()) + Expect(c.GetClient()).To(Equal(cluster.client)) + }) + + It("should provide a function to get the Scheme", func() { + c, err := New(cfg) + Expect(err).NotTo(HaveOccurred()) + cluster, ok := c.(*cluster) + Expect(ok).To(BeTrue()) + Expect(c.GetScheme()).To(Equal(cluster.scheme)) + }) + + It("should provide a function to get the FieldIndexer", func() { + c, err := New(cfg) + Expect(err).NotTo(HaveOccurred()) + cluster, ok := c.(*cluster) + Expect(ok).To(BeTrue()) + Expect(c.GetFieldIndexer()).To(Equal(cluster.cache)) + }) + + It("should provide a function to get the EventRecorder", func() { + c, err := New(cfg) + Expect(err).NotTo(HaveOccurred()) + Expect(c.GetEventRecorderFor("test")).NotTo(BeNil()) + }) + It("should provide a function to get the APIReader", func() { + c, err := New(cfg) + Expect(err).NotTo(HaveOccurred()) + Expect(c.GetAPIReader()).NotTo(BeNil()) + }) +}) diff --git a/pkg/cluster/internal.go b/pkg/cluster/internal.go new file mode 100644 index 0000000000..2742764231 --- /dev/null +++ b/pkg/cluster/internal.go @@ -0,0 +1,105 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "context" + "net/http" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" +) + +type cluster struct { + // config is the rest.config used to talk to the apiserver. Required. + config *rest.Config + + httpClient *http.Client + scheme *runtime.Scheme + cache cache.Cache + client client.Client + + // apiReader is the reader that will make requests to the api server and not the cache. + apiReader client.Reader + + // fieldIndexes knows how to add field indexes over the Cache used by this controller, + // which can later be consumed via field selectors from the injected client. + fieldIndexes client.FieldIndexer + + // recorderProvider is used to generate event recorders that will be injected into Controllers + // (and EventHandlers, Sources and Predicates). + recorderProvider *intrec.Provider + + // mapper is used to map resources to kind, and map kind and version. + mapper meta.RESTMapper + + // Logger is the logger that should be used by this manager. + // If none is set, it defaults to log.Log global logger. + logger logr.Logger +} + +func (c *cluster) GetConfig() *rest.Config { + return c.config +} + +func (c *cluster) GetHTTPClient() *http.Client { + return c.httpClient +} + +func (c *cluster) GetClient() client.Client { + return c.client +} + +func (c *cluster) GetScheme() *runtime.Scheme { + return c.scheme +} + +func (c *cluster) GetFieldIndexer() client.FieldIndexer { + return c.fieldIndexes +} + +func (c *cluster) GetCache() cache.Cache { + return c.cache +} + +func (c *cluster) GetEventRecorderFor(name string) record.EventRecorder { + return c.recorderProvider.GetEventRecorderFor(name) +} + +func (c *cluster) GetRESTMapper() meta.RESTMapper { + return c.mapper +} + +func (c *cluster) GetAPIReader() client.Reader { + return c.apiReader +} + +func (c *cluster) GetLogger() logr.Logger { + return c.logger +} + +func (c *cluster) Start(ctx context.Context) error { + defer c.recorderProvider.Stop(ctx) + return c.cache.Start(ctx) +} diff --git a/pkg/config/controller.go b/pkg/config/controller.go new file mode 100644 index 0000000000..5eea2965f6 --- /dev/null +++ b/pkg/config/controller.go @@ -0,0 +1,92 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "time" + + "github.com/go-logr/logr" +) + +// Controller contains configuration options for controllers. It only includes options +// that makes sense for a set of controllers and is used for defaulting the options +// of multiple controllers. +type Controller struct { + // SkipNameValidation allows skipping the name validation that ensures that every controller name is unique. + // Unique controller names are important to get unique metrics and logs for a controller. + // Can be overwritten for a controller via the SkipNameValidation setting on the controller. + // Defaults to false if SkipNameValidation setting on controller and Manager are unset. + SkipNameValidation *bool + + // GroupKindConcurrency is a map from a Kind to the number of concurrent reconciliation + // allowed for that controller. + // + // When a controller is registered within this manager using the builder utilities, + // users have to specify the type the controller reconciles in the For(...) call. + // If the object's kind passed matches one of the keys in this map, the concurrency + // for that controller is set to the number specified. + // + // The key is expected to be consistent in form with GroupKind.String(), + // e.g. ReplicaSet in apps group (regardless of version) would be `ReplicaSet.apps`. + GroupKindConcurrency map[string]int + + // MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1. + MaxConcurrentReconciles int + + // CacheSyncTimeout refers to the time limit set to wait for syncing caches. + // Defaults to 2 minutes if not set. + CacheSyncTimeout time.Duration + + // RecoverPanic indicates whether the panic caused by reconcile should be recovered. + // Can be overwritten for a controller via the RecoverPanic setting on the controller. + // Defaults to true if RecoverPanic setting on controller and Manager are unset. + RecoverPanic *bool + + // NeedLeaderElection indicates whether the controller needs to use leader election. + // Defaults to true, which means the controller will use leader election. + NeedLeaderElection *bool + + // EnableWarmup specifies whether the controller should start its sources when the manager is not + // the leader. This is useful for cases where sources take a long time to start, as it allows + // for the controller to warm up its caches even before it is elected as the leader. This + // improves leadership failover time, as the caches will be prepopulated before the controller + // transitions to be leader. + // + // Setting EnableWarmup to true and NeedLeaderElection to true means the controller will start its + // sources without waiting to become leader. + // Setting EnableWarmup to true and NeedLeaderElection to false is a no-op as controllers without + // leader election do not wait on leader election to start their sources. + // Defaults to false. + // + // Note: This feature is currently in beta and subject to change. + // For more details, see: https://github.com/kubernetes-sigs/controller-runtime/issues/3220. + EnableWarmup *bool + + // UsePriorityQueue configures the controllers queue to use the controller-runtime provided + // priority queue. + // + // Note: This flag is enabled by default. + // For more details, see: https://github.com/kubernetes-sigs/controller-runtime/issues/2374. + UsePriorityQueue *bool + + // Logger is the logger controllers should use. + Logger logr.Logger + + // ReconciliationTimeout is used as the timeout passed to the context of each Reconcile call. + // By default, there is no timeout. + ReconciliationTimeout time.Duration +} diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 3411e95f75..853788d52f 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -17,50 +17,200 @@ limitations under the License. package controller import ( + "context" "fmt" + "time" + "github.com/go-logr/logr" "k8s.io/client-go/util/workqueue" - "sigs.k8s.io/controller-runtime/pkg/handler" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" + + "sigs.k8s.io/controller-runtime/pkg/config" + "sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue" "sigs.k8s.io/controller-runtime/pkg/internal/controller" "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" ) -// Options are the arguments for creating a new Controller -type Options struct { +// Options are the arguments for creating a new Controller. +type Options = TypedOptions[reconcile.Request] + +// TypedOptions are the arguments for creating a new Controller. +type TypedOptions[request comparable] struct { + // SkipNameValidation allows skipping the name validation that ensures that every controller name is unique. + // Unique controller names are important to get unique metrics and logs for a controller. + // Defaults to the Controller.SkipNameValidation setting from the Manager if unset. + // Defaults to false if Controller.SkipNameValidation setting from the Manager is also unset. + SkipNameValidation *bool + // MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1. MaxConcurrentReconciles int + // CacheSyncTimeout refers to the time limit set to wait for syncing caches. + // Defaults to 2 minutes if not set. + CacheSyncTimeout time.Duration + + // RecoverPanic indicates whether the panic caused by reconcile should be recovered. + // Defaults to the Controller.RecoverPanic setting from the Manager if unset. + // Defaults to true if Controller.RecoverPanic setting from the Manager is also unset. + RecoverPanic *bool + + // NeedLeaderElection indicates whether the controller needs to use leader election. + // Defaults to true, which means the controller will use leader election. + NeedLeaderElection *bool + // Reconciler reconciles an object - Reconciler reconcile.Reconciler + Reconciler reconcile.TypedReconciler[request] + + // RateLimiter is used to limit how frequently requests may be queued. + // Defaults to MaxOfRateLimiter which has both overall and per-item rate limiting. + // The overall is a token bucket and the per-item is exponential. + RateLimiter workqueue.TypedRateLimiter[request] + + // NewQueue constructs the queue for this controller once the controller is ready to start. + // With NewQueue a custom queue implementation can be used, e.g. a priority queue to prioritize with which + // priority/order objects are reconciled (e.g. to reconcile objects with changes first). + // This is a func because the standard Kubernetes work queues start themselves immediately, which + // leads to goroutine leaks if something calls controller.New repeatedly. + // The NewQueue func gets the controller name and the RateLimiter option (defaulted if necessary) passed in. + // NewQueue defaults to NewRateLimitingQueueWithConfig. + // + // NOTE: LOW LEVEL PRIMITIVE! + // Only use a custom NewQueue if you know what you are doing. + NewQueue func(controllerName string, rateLimiter workqueue.TypedRateLimiter[request]) workqueue.TypedRateLimitingInterface[request] + + // Logger will be used to build a default LogConstructor if unset. + Logger logr.Logger + + // LogConstructor is used to construct a logger used for this controller and passed + // to each reconciliation via the context field. + LogConstructor func(request *request) logr.Logger + + // UsePriorityQueue configures the controllers queue to use the controller-runtime provided + // priority queue. + // + // Note: This flag is enabled by default. + // For more details, see: https://github.com/kubernetes-sigs/controller-runtime/issues/2374. + UsePriorityQueue *bool + + // EnableWarmup specifies whether the controller should start its sources when the manager is not + // the leader. This is useful for cases where sources take a long time to start, as it allows + // for the controller to warm up its caches even before it is elected as the leader. This + // improves leadership failover time, as the caches will be prepopulated before the controller + // transitions to be leader. + // + // Setting EnableWarmup to true and NeedLeaderElection to true means the controller will start its + // sources without waiting to become leader. + // Setting EnableWarmup to true and NeedLeaderElection to false is a no-op as controllers without + // leader election do not wait on leader election to start their sources. + // Defaults to false. + // + // Note: This feature is currently in beta and subject to change. + // For more details, see: https://github.com/kubernetes-sigs/controller-runtime/issues/3220. + EnableWarmup *bool + + // ReconciliationTimeout is used as the timeout passed to the context of each Reconcile call. + // By default, there is no timeout. + ReconciliationTimeout time.Duration +} + +// DefaultFromConfig defaults the config from a config.Controller +func (options *TypedOptions[request]) DefaultFromConfig(config config.Controller) { + if options.Logger.GetSink() == nil { + options.Logger = config.Logger + } + + if options.SkipNameValidation == nil { + options.SkipNameValidation = config.SkipNameValidation + } + + if options.MaxConcurrentReconciles <= 0 && config.MaxConcurrentReconciles > 0 { + options.MaxConcurrentReconciles = config.MaxConcurrentReconciles + } + + if options.CacheSyncTimeout == 0 && config.CacheSyncTimeout > 0 { + options.CacheSyncTimeout = config.CacheSyncTimeout + } + + if options.UsePriorityQueue == nil { + options.UsePriorityQueue = config.UsePriorityQueue + } + + if options.RecoverPanic == nil { + options.RecoverPanic = config.RecoverPanic + } + + if options.NeedLeaderElection == nil { + options.NeedLeaderElection = config.NeedLeaderElection + } + + if options.EnableWarmup == nil { + options.EnableWarmup = config.EnableWarmup + } + + if options.ReconciliationTimeout == 0 { + options.ReconciliationTimeout = config.ReconciliationTimeout + } } -// Controller implements a Kubernetes API. A Controller manages a work queue fed reconcile.Requests -// from source.Sources. Work is performed through the reconcile.Reconciler for each enqueued item. +// Controller implements an API. A Controller manages a work queue fed reconcile.Requests +// from source.Sources. Work is performed through the reconcile.Reconciler for each enqueued item. // Work typically is reads and writes Kubernetes objects to make the system state match the state specified // in the object Spec. -type Controller interface { +type Controller = TypedController[reconcile.Request] + +// TypedController implements an API. +type TypedController[request comparable] interface { // Reconciler is called to reconcile an object by Namespace/Name - reconcile.Reconciler + reconcile.TypedReconciler[request] - // Watch takes events provided by a Source and uses the EventHandler to - // enqueue reconcile.Requests in response to the events. - // - // Watch may be provided one or more Predicates to filter events before - // they are given to the EventHandler. Events will be passed to the - // EventHandler if all provided Predicates evaluate to true. - Watch(src source.Source, eventhandler handler.EventHandler, predicates ...predicate.Predicate) error + // Watch watches the provided Source. + Watch(src source.TypedSource[request]) error - // Start starts the controller. Start blocks until stop is closed or a + // Start starts the controller. Start blocks until the context is closed or a // controller has an error starting. - Start(stop <-chan struct{}) error + Start(ctx context.Context) error + + // GetLogger returns this controller logger prefilled with basic information. + GetLogger() logr.Logger } // New returns a new Controller registered with the Manager. The Manager will ensure that shared Caches have // been synced before the Controller is Started. +// +// The name must be unique as it is used to identify the controller in metrics and logs. func New(name string, mgr manager.Manager, options Options) (Controller, error) { + return NewTyped(name, mgr, options) +} + +// NewTyped returns a new typed controller registered with the Manager, +// +// The name must be unique as it is used to identify the controller in metrics and logs. +func NewTyped[request comparable](name string, mgr manager.Manager, options TypedOptions[request]) (TypedController[request], error) { + options.DefaultFromConfig(mgr.GetControllerOptions()) + c, err := NewTypedUnmanaged(name, options) + if err != nil { + return nil, err + } + + // Add the controller as a Manager components + return c, mgr.Add(c) +} + +// NewUnmanaged returns a new controller without adding it to the manager. The +// caller is responsible for starting the returned controller. +// +// The name must be unique as it is used to identify the controller in metrics and logs. +func NewUnmanaged(name string, options Options) (Controller, error) { + return NewTypedUnmanaged(name, options) +} + +// NewTypedUnmanaged returns a new typed controller without adding it to the manager. +// +// The name must be unique as it is used to identify the controller in metrics and logs. +func NewTypedUnmanaged[request comparable](name string, options TypedOptions[request]) (TypedController[request], error) { if options.Reconciler == nil { return nil, fmt.Errorf("must specify Reconciler") } @@ -69,28 +219,73 @@ func New(name string, mgr manager.Manager, options Options) (Controller, error) return nil, fmt.Errorf("must specify Name for Controller") } + if options.SkipNameValidation == nil || !*options.SkipNameValidation { + if err := checkName(name); err != nil { + return nil, err + } + } + + if options.LogConstructor == nil { + log := options.Logger.WithValues( + "controller", name, + ) + options.LogConstructor = func(in *request) logr.Logger { + log := log + if req, ok := any(in).(*reconcile.Request); ok && req != nil { + log = log.WithValues( + "object", klog.KRef(req.Namespace, req.Name), + "namespace", req.Namespace, "name", req.Name, + ) + } + return log + } + } + if options.MaxConcurrentReconciles <= 0 { options.MaxConcurrentReconciles = 1 } - // Inject dependencies into Reconciler - if err := mgr.SetFields(options.Reconciler); err != nil { - return nil, err + if options.CacheSyncTimeout == 0 { + options.CacheSyncTimeout = 2 * time.Minute + } + + if options.RateLimiter == nil { + if ptr.Deref(options.UsePriorityQueue, true) { + options.RateLimiter = workqueue.NewTypedItemExponentialFailureRateLimiter[request](5*time.Millisecond, 1000*time.Second) + } else { + options.RateLimiter = workqueue.DefaultTypedControllerRateLimiter[request]() + } + } + + if options.NewQueue == nil { + options.NewQueue = func(controllerName string, rateLimiter workqueue.TypedRateLimiter[request]) workqueue.TypedRateLimitingInterface[request] { + if ptr.Deref(options.UsePriorityQueue, true) { + return priorityqueue.New(controllerName, func(o *priorityqueue.Opts[request]) { + o.Log = options.Logger.WithValues("controller", controllerName) + o.RateLimiter = rateLimiter + }) + } + return workqueue.NewTypedRateLimitingQueueWithConfig(rateLimiter, workqueue.TypedRateLimitingQueueConfig[request]{ + Name: controllerName, + }) + } } // Create controller with dependencies set - c := &controller.Controller{ + return controller.New[request](controller.Options[request]{ Do: options.Reconciler, - Cache: mgr.GetCache(), - Config: mgr.GetConfig(), - Scheme: mgr.GetScheme(), - Client: mgr.GetClient(), - Recorder: mgr.GetEventRecorderFor(name), - Queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name), + RateLimiter: options.RateLimiter, + NewQueue: options.NewQueue, MaxConcurrentReconciles: options.MaxConcurrentReconciles, + CacheSyncTimeout: options.CacheSyncTimeout, Name: name, - } - - // Add the controller as a Manager components - return c, mgr.Add(c) + LogConstructor: options.LogConstructor, + RecoverPanic: options.RecoverPanic, + LeaderElected: options.NeedLeaderElection, + EnableWarmup: options.EnableWarmup, + ReconciliationTimeout: options.ReconciliationTimeout, + }), nil } + +// ReconcileIDFromContext gets the reconcileID from the current context. +var ReconcileIDFromContext = controller.ReconcileIDFromContext diff --git a/pkg/controller/controller_integration_test.go b/pkg/controller/controller_integration_test.go index df2d9c1f90..e09813eee2 100644 --- a/pkg/controller/controller_integration_test.go +++ b/pkg/controller/controller_integration_test.go @@ -17,66 +17,94 @@ limitations under the License. package controller_test import ( + "context" + "fmt" + "strconv" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "sigs.k8s.io/controller-runtime/pkg/manager" ) var _ = Describe("controller", func() { var reconciled chan reconcile.Request - var stop chan struct{} BeforeEach(func() { - stop = make(chan struct{}) reconciled = make(chan reconcile.Request) Expect(cfg).NotTo(BeNil()) }) - AfterEach(func() { - close(stop) - }) - Describe("controller", func() { // TODO(directxman12): write a whole suite of controller-client interaction tests - It("should reconcile", func(done Done) { + // The watches in this test are setup with a namespace predicate to avoid each table entry + // from interfering with the others. We cannot add a delete call for the pods created in the + // test, as it causes flakes with the api-server termination timing out. + // See https://github.com/kubernetes-sigs/controller-runtime/issues/1571 for a description + // of the issue, and a discussion here: https://github.com/kubernetes-sigs/controller-runtime/pull/3192#discussion_r2186967799 + DescribeTable("should reconcile", func(ctx SpecContext, enableWarmup bool) { By("Creating the Manager") cm, err := manager.New(cfg, manager.Options{}) Expect(err).NotTo(HaveOccurred()) By("Creating the Controller") - instance, err := controller.New("foo-controller", cm, controller.Options{ - Reconciler: reconcile.Func( - func(request reconcile.Request) (reconcile.Result, error) { - reconciled <- request - return reconcile.Result{}, nil - }), - }) + instance, err := controller.New( + fmt.Sprintf("foo-controller-%t", enableWarmup), + cm, + controller.Options{ + Reconciler: reconcile.Func( + func(_ context.Context, request reconcile.Request) (reconcile.Result, error) { + reconciled <- request + return reconcile.Result{}, nil + }), + EnableWarmup: ptr.To(enableWarmup), + }, + ) Expect(err).NotTo(HaveOccurred()) + testNamespace := strconv.FormatBool(enableWarmup) + By("Watching Resources") - err = instance.Watch(&source.Kind{Type: &appsv1.ReplicaSet{}}, &handler.EnqueueRequestForOwner{ - OwnerType: &appsv1.Deployment{}, - }) + err = instance.Watch( + source.Kind(cm.GetCache(), &appsv1.ReplicaSet{}, + handler.TypedEnqueueRequestForOwner[*appsv1.ReplicaSet](cm.GetScheme(), cm.GetRESTMapper(), &appsv1.Deployment{}), + makeNamespacePredicate[*appsv1.ReplicaSet](testNamespace), + ), + ) Expect(err).NotTo(HaveOccurred()) - err = instance.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForObject{}) + err = instance.Watch( + source.Kind(cm.GetCache(), &appsv1.Deployment{}, + &handler.TypedEnqueueRequestForObject[*appsv1.Deployment]{}, + makeNamespacePredicate[*appsv1.Deployment](testNamespace), + ), + ) Expect(err).NotTo(HaveOccurred()) + err = cm.GetClient().Get(ctx, types.NamespacedName{Name: "foo"}, &corev1.Namespace{}) + Expect(err).To(Equal(&cache.ErrCacheNotStarted{})) + err = cm.GetClient().List(ctx, &corev1.NamespaceList{}) + Expect(err).To(Equal(&cache.ErrCacheNotStarted{})) + By("Starting the Manager") go func() { defer GinkgoRecover() - Expect(cm.Start(stop)).NotTo(HaveOccurred()) + Expect(cm.Start(ctx)).NotTo(HaveOccurred()) }() deployment := &appsv1.Deployment{ @@ -92,6 +120,9 @@ var _ = Describe("controller", func() { { Name: "nginx", Image: "nginx", + SecurityContext: &corev1.SecurityContext{ + Privileged: truePtr(), + }, }, }, }, @@ -99,19 +130,25 @@ var _ = Describe("controller", func() { }, } expectedReconcileRequest := reconcile.Request{NamespacedName: types.NamespacedName{ - Namespace: "default", + Namespace: testNamespace, Name: "deployment-name", }} + By("Creating the test namespace") + _, err = clientset.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: testNamespace}, + }, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + By("Invoking Reconciling for Create") - deployment, err = clientset.AppsV1().Deployments("default").Create(deployment) + deployment, err = clientset.AppsV1().Deployments(testNamespace).Create(ctx, deployment, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(<-reconciled).To(Equal(expectedReconcileRequest)) By("Invoking Reconciling for Update") newDeployment := deployment.DeepCopy() newDeployment.Labels = map[string]string{"foo": "bar"} - newDeployment, err = clientset.AppsV1().Deployments("default").Update(newDeployment) + _, err = clientset.AppsV1().Deployments(testNamespace).Update(ctx, newDeployment, metav1.UpdateOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(<-reconciled).To(Equal(expectedReconcileRequest)) @@ -134,29 +171,82 @@ var _ = Describe("controller", func() { Template: deployment.Spec.Template, }, } - replicaset, err = clientset.AppsV1().ReplicaSets("default").Create(replicaset) + replicaset, err = clientset.AppsV1().ReplicaSets(testNamespace).Create(ctx, replicaset, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(<-reconciled).To(Equal(expectedReconcileRequest)) By("Invoking Reconciling for an OwnedObject when it is updated") newReplicaset := replicaset.DeepCopy() newReplicaset.Labels = map[string]string{"foo": "bar"} - newReplicaset, err = clientset.AppsV1().ReplicaSets("default").Update(newReplicaset) + _, err = clientset.AppsV1().ReplicaSets(testNamespace).Update(ctx, newReplicaset, metav1.UpdateOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(<-reconciled).To(Equal(expectedReconcileRequest)) By("Invoking Reconciling for an OwnedObject when it is deleted") - err = clientset.AppsV1().ReplicaSets("default").Delete(replicaset.Name, &metav1.DeleteOptions{}) + err = clientset.AppsV1().ReplicaSets(testNamespace).Delete(ctx, replicaset.Name, metav1.DeleteOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(<-reconciled).To(Equal(expectedReconcileRequest)) By("Invoking Reconciling for Delete") - err = clientset.AppsV1().Deployments("default"). - Delete("deployment-name", &metav1.DeleteOptions{}) + err = clientset.AppsV1().Deployments(testNamespace). + Delete(ctx, "deployment-name", metav1.DeleteOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(<-reconciled).To(Equal(expectedReconcileRequest)) - close(done) - }, 5) + By("Listing a type with a slice of pointers as items field") + err = cm.GetClient(). + List(ctx, &controllertest.UnconventionalListTypeList{}) + Expect(err).NotTo(HaveOccurred()) + + By("Invoking Reconciling for a pod when it is created when adding watcher dynamically") + // Add new watcher dynamically + err = instance.Watch( + source.Kind(cm.GetCache(), &corev1.Pod{}, + &handler.TypedEnqueueRequestForObject[*corev1.Pod]{}, + makeNamespacePredicate[*corev1.Pod](testNamespace), + ), + ) + Expect(err).NotTo(HaveOccurred()) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "pod-name"}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx:latest", + Ports: []corev1.ContainerPort{ + { + ContainerPort: 80, + }, + }, + }, + }, + }, + } + expectedReconcileRequest = reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: testNamespace, + Name: "pod-name", + }} + _, err = clientset.CoreV1().Pods(testNamespace).Create(ctx, pod, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(<-reconciled).To(Equal(expectedReconcileRequest)) + }, + Entry("with controller warmup enabled", true), + Entry("with controller warmup not enabled", false), + ) }) }) + +// makeNamespacePredicate returns a predicate that filters out all objects not in the passed in +// namespace. +func makeNamespacePredicate[object client.Object](namespace string) predicate.TypedPredicate[object] { + return predicate.NewTypedPredicateFuncs[object](func(obj object) bool { + return obj.GetNamespace() == namespace + }) +} + +func truePtr() *bool { + t := true + return &t +} diff --git a/pkg/controller/controller_suite_test.go b/pkg/controller/controller_suite_test.go index bb76e87722..57e5471d03 100644 --- a/pkg/controller/controller_suite_test.go +++ b/pkg/controller/controller_suite_test.go @@ -17,48 +17,74 @@ limitations under the License. package controller_test import ( + "net/http" "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" - "sigs.k8s.io/controller-runtime/pkg/metrics" + crscheme "sigs.k8s.io/controller-runtime/pkg/scheme" ) func TestSource(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, "Controller Integration Suite", []Reporter{envtest.NewlineReporter{}}) + RunSpecs(t, "Controller Integration Suite") } var testenv *envtest.Environment var cfg *rest.Config var clientset *kubernetes.Clientset -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) +// clientTransport is used to force-close keep-alives in tests that check for leaks. +var clientTransport *http.Transport + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + err := (&crscheme.Builder{ + GroupVersion: schema.GroupVersion{Group: "chaosapps.metamagical.io", Version: "v1"}, + }). + Register( + &controllertest.UnconventionalListType{}, + &controllertest.UnconventionalListTypeList{}, + ).AddToScheme(scheme.Scheme) + Expect(err).ToNot(HaveOccurred()) - testenv = &envtest.Environment{} + testenv = &envtest.Environment{ + CRDDirectoryPaths: []string{"testdata/crds"}, + } - var err error cfg, err = testenv.Start() Expect(err).NotTo(HaveOccurred()) + cfg.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { + // NB(directxman12): we can't set Transport *and* use TLS options, + // so we grab the transport right after it gets created so that we can + // type-assert on it (hopefully)? + // hopefully this doesn't break 🤞 + clientTransport = rt.(*http.Transport) + return rt + } + clientset, err = kubernetes.NewForConfig(cfg) Expect(err).NotTo(HaveOccurred()) // Prevent the metrics listener being created - metrics.DefaultBindAddress = "0" - - close(done) -}, 60) + metricsserver.DefaultBindAddress = "0" +}) var _ = AfterSuite(func() { Expect(testenv.Stop()).To(Succeed()) // Put the DefaultBindAddress back - metrics.DefaultBindAddress = ":8080" + metricsserver.DefaultBindAddress = ":8080" }) diff --git a/pkg/controller/controller_test.go b/pkg/controller/controller_test.go index e4ccd7970b..06138a476b 100644 --- a/pkg/controller/controller_test.go +++ b/pkg/controller/controller_test.go @@ -17,66 +17,113 @@ limitations under the License. package controller_test import ( - "fmt" + "context" + "time" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "sigs.k8s.io/controller-runtime/pkg/client" + "go.uber.org/goleak" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/util/workqueue" + "k8s.io/utils/ptr" + + "sigs.k8s.io/controller-runtime/pkg/config" "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + internalcontroller "sigs.k8s.io/controller-runtime/pkg/internal/controller" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/source" ) var _ = Describe("controller.Controller", func() { - var stop chan struct{} - - rec := reconcile.Func(func(reconcile.Request) (reconcile.Result, error) { + rec := reconcile.Func(func(context.Context, reconcile.Request) (reconcile.Result, error) { return reconcile.Result{}, nil }) - BeforeEach(func() { - stop = make(chan struct{}) - }) - - AfterEach(func() { - close(stop) - }) Describe("New", func() { - It("should return an error if Name is not Specified", func(done Done) { + It("should return an error if Name is not Specified", func() { m, err := manager.New(cfg, manager.Options{}) Expect(err).NotTo(HaveOccurred()) c, err := controller.New("", m, controller.Options{Reconciler: rec}) Expect(c).To(BeNil()) Expect(err.Error()).To(ContainSubstring("must specify Name for Controller")) - - close(done) }) - It("should return an error if Reconciler is not Specified", func(done Done) { + It("should return an error if Reconciler is not Specified", func() { m, err := manager.New(cfg, manager.Options{}) Expect(err).NotTo(HaveOccurred()) c, err := controller.New("foo", m, controller.Options{}) Expect(c).To(BeNil()) Expect(err.Error()).To(ContainSubstring("must specify Reconciler")) - - close(done) }) - It("NewController should return an error if injecting Reconciler fails", func(done Done) { + It("should return an error if two controllers are registered with the same name", func() { m, err := manager.New(cfg, manager.Options{}) Expect(err).NotTo(HaveOccurred()) - c, err := controller.New("foo", m, controller.Options{Reconciler: &failRec{}}) - Expect(c).To(BeNil()) + c1, err := controller.New("c3", m, controller.Options{Reconciler: rec}) + Expect(err).NotTo(HaveOccurred()) + Expect(c1).ToNot(BeNil()) + + c2, err := controller.New("c3", m, controller.Options{Reconciler: rec}) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("controller with name c3 already exists")) + Expect(c2).To(BeNil()) + }) + + It("should return an error if two controllers are registered with the same name and SkipNameValidation is set to false on the manager", func() { + m, err := manager.New(cfg, manager.Options{ + Controller: config.Controller{ + SkipNameValidation: ptr.To(false), + }, + }) + Expect(err).NotTo(HaveOccurred()) + + c1, err := controller.New("c4", m, controller.Options{Reconciler: rec}) + Expect(err).NotTo(HaveOccurred()) + Expect(c1).ToNot(BeNil()) + + c2, err := controller.New("c4", m, controller.Options{Reconciler: rec}) Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("expected error")) + Expect(err.Error()).To(ContainSubstring("controller with name c4 already exists")) + Expect(c2).To(BeNil()) + }) + + It("should not return an error if two controllers are registered with the same name and SkipNameValidation is set on the manager", func() { + m, err := manager.New(cfg, manager.Options{ + Controller: config.Controller{ + SkipNameValidation: ptr.To(true), + }, + }) + Expect(err).NotTo(HaveOccurred()) + + c1, err := controller.New("c5", m, controller.Options{Reconciler: rec}) + Expect(err).NotTo(HaveOccurred()) + Expect(c1).ToNot(BeNil()) - close(done) + c2, err := controller.New("c5", m, controller.Options{Reconciler: rec}) + Expect(err).NotTo(HaveOccurred()) + Expect(c2).ToNot(BeNil()) }) - It("should not return an error if two controllers are registered with different names", func(done Done) { + It("should not return an error if two controllers are registered with the same name and SkipNameValidation is set on the controller", func() { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + c1, err := controller.New("c6", m, controller.Options{Reconciler: rec}) + Expect(err).NotTo(HaveOccurred()) + Expect(c1).ToNot(BeNil()) + + c2, err := controller.New("c6", m, controller.Options{Reconciler: rec, SkipNameValidation: ptr.To(true)}) + Expect(err).NotTo(HaveOccurred()) + Expect(c2).ToNot(BeNil()) + }) + + It("should not return an error if two controllers are registered with different names", func() { m, err := manager.New(cfg, manager.Options{}) Expect(err).NotTo(HaveOccurred()) @@ -87,21 +134,447 @@ var _ = Describe("controller.Controller", func() { c2, err := controller.New("c2", m, controller.Options{Reconciler: rec}) Expect(err).NotTo(HaveOccurred()) Expect(c2).ToNot(BeNil()) + }) + + It("should not leak goroutines when stopped", func(specCtx SpecContext) { + currentGRs := goleak.IgnoreCurrent() - close(done) + ctx, cancel := context.WithCancel(specCtx) + watchChan := make(chan event.GenericEvent, 1) + watch := source.Channel(watchChan, &handler.EnqueueRequestForObject{}) + watchChan <- event.GenericEvent{Object: &corev1.Pod{}} + + reconcileStarted := make(chan struct{}) + controllerFinished := make(chan struct{}) + rec := reconcile.Func(func(context.Context, reconcile.Request) (reconcile.Result, error) { + defer GinkgoRecover() + close(reconcileStarted) + // Make sure reconciliation takes a moment and is not quicker than the controllers + // shutdown. + time.Sleep(50 * time.Millisecond) + // Explicitly test this on top of the leakdetection, as the latter uses Eventually + // so might succeed even when the controller does not wait for all reconciliations + // to finish. + Expect(controllerFinished).NotTo(BeClosed()) + return reconcile.Result{}, nil + }) + + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + c, err := controller.New("new-controller-0", m, controller.Options{Reconciler: rec}) + Expect(c.Watch(watch)).To(Succeed()) + Expect(err).NotTo(HaveOccurred()) + + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).To(Succeed()) + close(controllerFinished) + }() + + <-reconcileStarted + cancel() + <-controllerFinished + + // force-close keep-alive connections. These'll time anyway (after + // like 30s or so) but force it to speed up the tests. + clientTransport.CloseIdleConnections() + Eventually(func() error { return goleak.Find(currentGRs) }).Should(Succeed()) + }) + + It("should not create goroutines if never started", func() { + currentGRs := goleak.IgnoreCurrent() + + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + _, err = controller.New("new-controller-1", m, controller.Options{Reconciler: rec}) + Expect(err).NotTo(HaveOccurred()) + + // force-close keep-alive connections. These'll time anyway (after + // like 30s or so) but force it to speed up the tests. + clientTransport.CloseIdleConnections() + Eventually(func() error { return goleak.Find(currentGRs) }).Should(Succeed()) + }) + + It("should default RateLimiter and NewQueue if not specified", func() { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + c, err := controller.New("new-controller-2", m, controller.Options{ + Reconciler: reconcile.Func(nil), + }) + Expect(err).NotTo(HaveOccurred()) + + ctrl, ok := c.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + + Expect(ctrl.RateLimiter).NotTo(BeNil()) + Expect(ctrl.NewQueue).NotTo(BeNil()) + }) + + It("should not override RateLimiter and NewQueue if specified", func() { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + customRateLimiter := workqueue.NewTypedItemExponentialFailureRateLimiter[reconcile.Request](5*time.Millisecond, 1000*time.Second) + customNewQueueCalled := false + customNewQueue := func(controllerName string, rateLimiter workqueue.TypedRateLimiter[reconcile.Request]) workqueue.TypedRateLimitingInterface[reconcile.Request] { + customNewQueueCalled = true + return nil + } + + c, err := controller.New("new-controller-3", m, controller.Options{ + Reconciler: reconcile.Func(nil), + RateLimiter: customRateLimiter, + NewQueue: customNewQueue, + }) + Expect(err).NotTo(HaveOccurred()) + + ctrl, ok := c.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + + Expect(ctrl.RateLimiter).To(BeIdenticalTo(customRateLimiter)) + ctrl.NewQueue("controller1", nil) + Expect(customNewQueueCalled).To(BeTrue(), "Expected customNewQueue to be called") + }) + + It("should default RecoverPanic from the manager", func() { + m, err := manager.New(cfg, manager.Options{Controller: config.Controller{RecoverPanic: ptr.To(true)}}) + Expect(err).NotTo(HaveOccurred()) + + c, err := controller.New("new-controller-4", m, controller.Options{ + Reconciler: reconcile.Func(nil), + }) + Expect(err).NotTo(HaveOccurred()) + + ctrl, ok := c.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + + Expect(ctrl.RecoverPanic).NotTo(BeNil()) + Expect(*ctrl.RecoverPanic).To(BeTrue()) + }) + + It("should not override RecoverPanic on the controller", func() { + m, err := manager.New(cfg, manager.Options{Controller: config.Controller{RecoverPanic: ptr.To(true)}}) + Expect(err).NotTo(HaveOccurred()) + + c, err := controller.New("new-controller", m, controller.Options{ + RecoverPanic: ptr.To(false), + Reconciler: reconcile.Func(nil), + }) + Expect(err).NotTo(HaveOccurred()) + + ctrl, ok := c.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + + Expect(ctrl.RecoverPanic).NotTo(BeNil()) + Expect(*ctrl.RecoverPanic).To(BeFalse()) + }) + + It("should default NeedLeaderElection from the manager", func() { + m, err := manager.New(cfg, manager.Options{Controller: config.Controller{NeedLeaderElection: ptr.To(true)}}) + Expect(err).NotTo(HaveOccurred()) + + c, err := controller.New("new-controller-5", m, controller.Options{ + Reconciler: reconcile.Func(nil), + }) + Expect(err).NotTo(HaveOccurred()) + + ctrl, ok := c.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + + Expect(ctrl.NeedLeaderElection()).To(BeTrue()) + }) + + It("should not override NeedLeaderElection on the controller", func() { + m, err := manager.New(cfg, manager.Options{Controller: config.Controller{NeedLeaderElection: ptr.To(true)}}) + Expect(err).NotTo(HaveOccurred()) + + c, err := controller.New("new-controller-6", m, controller.Options{ + NeedLeaderElection: ptr.To(false), + Reconciler: reconcile.Func(nil), + }) + Expect(err).NotTo(HaveOccurred()) + + ctrl, ok := c.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + + Expect(ctrl.NeedLeaderElection()).To(BeFalse()) + }) + + It("Should default MaxConcurrentReconciles from the manager if set", func() { + m, err := manager.New(cfg, manager.Options{Controller: config.Controller{MaxConcurrentReconciles: 5}}) + Expect(err).NotTo(HaveOccurred()) + + c, err := controller.New("new-controller-7", m, controller.Options{ + Reconciler: reconcile.Func(nil), + }) + Expect(err).NotTo(HaveOccurred()) + + ctrl, ok := c.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + + Expect(ctrl.MaxConcurrentReconciles).To(BeEquivalentTo(5)) }) - }) -}) -var _ reconcile.Reconciler = &failRec{} -var _ inject.Client = &failRec{} + It("Should default MaxConcurrentReconciles to 1 if unset", func() { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + c, err := controller.New("new-controller-8", m, controller.Options{ + Reconciler: reconcile.Func(nil), + }) + Expect(err).NotTo(HaveOccurred()) + + ctrl, ok := c.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + + Expect(ctrl.MaxConcurrentReconciles).To(BeEquivalentTo(1)) + }) + + It("Should leave MaxConcurrentReconciles if set", func() { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + c, err := controller.New("new-controller-9", m, controller.Options{ + Reconciler: reconcile.Func(nil), + MaxConcurrentReconciles: 5, + }) + Expect(err).NotTo(HaveOccurred()) + + ctrl, ok := c.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + + Expect(ctrl.MaxConcurrentReconciles).To(BeEquivalentTo(5)) + }) + + It("Should default CacheSyncTimeout from the manager if set", func() { + m, err := manager.New(cfg, manager.Options{Controller: config.Controller{CacheSyncTimeout: 5}}) + Expect(err).NotTo(HaveOccurred()) + + c, err := controller.New("new-controller-10", m, controller.Options{ + Reconciler: reconcile.Func(nil), + }) + Expect(err).NotTo(HaveOccurred()) + + ctrl, ok := c.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + + Expect(ctrl.CacheSyncTimeout).To(BeEquivalentTo(5)) + }) + + It("Should default CacheSyncTimeout to 2 minutes if unset", func() { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + c, err := controller.New("new-controller-11", m, controller.Options{ + Reconciler: reconcile.Func(nil), + }) + Expect(err).NotTo(HaveOccurred()) + + ctrl, ok := c.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + + Expect(ctrl.CacheSyncTimeout).To(BeEquivalentTo(2 * time.Minute)) + }) + + It("Should leave CacheSyncTimeout if set", func() { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + c, err := controller.New("new-controller-12", m, controller.Options{ + Reconciler: reconcile.Func(nil), + CacheSyncTimeout: 5, + }) + Expect(err).NotTo(HaveOccurred()) + + ctrl, ok := c.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + + Expect(ctrl.CacheSyncTimeout).To(BeEquivalentTo(5)) + }) + + It("should default NeedLeaderElection on the controller to true", func() { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + c, err := controller.New("new-controller-13", m, controller.Options{ + Reconciler: rec, + }) + Expect(err).NotTo(HaveOccurred()) + + ctrl, ok := c.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + + Expect(ctrl.NeedLeaderElection()).To(BeTrue()) + }) + + It("should allow for setting leaderElected to false", func() { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + c, err := controller.New("new-controller-14", m, controller.Options{ + NeedLeaderElection: ptr.To(false), + Reconciler: rec, + }) + Expect(err).NotTo(HaveOccurred()) + + ctrl, ok := c.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + + Expect(ctrl.NeedLeaderElection()).To(BeFalse()) + }) + + It("should implement manager.LeaderElectionRunnable", func() { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + c, err := controller.New("new-controller-15", m, controller.Options{ + Reconciler: rec, + }) + Expect(err).NotTo(HaveOccurred()) + + _, ok := c.(manager.LeaderElectionRunnable) + Expect(ok).To(BeTrue()) + }) + + It("should configure a priority queue per default", func() { + m, err := manager.New(cfg, manager.Options{ + Controller: config.Controller{}, + }) + Expect(err).NotTo(HaveOccurred()) + + c, err := controller.New("new-controller-16", m, controller.Options{ + Reconciler: rec, + }) + Expect(err).NotTo(HaveOccurred()) + + ctrl, ok := c.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + + q := ctrl.NewQueue("foo", nil) + _, ok = q.(priorityqueue.PriorityQueue[reconcile.Request]) + Expect(ok).To(BeTrue()) + }) + + It("should not configure a priority queue if UsePriorityQueue is set to false", func() { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + c, err := controller.New("new-controller-17", m, controller.Options{ + Reconciler: rec, + UsePriorityQueue: ptr.To(false), + }) + Expect(err).NotTo(HaveOccurred()) + + ctrl, ok := c.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + + q := ctrl.NewQueue("foo", nil) + _, ok = q.(priorityqueue.PriorityQueue[reconcile.Request]) + Expect(ok).To(BeFalse()) + }) -type failRec struct{} + It("should set EnableWarmup correctly", func() { + m, err := manager.New(cfg, manager.Options{}) + Expect(err).NotTo(HaveOccurred()) + + // Test with EnableWarmup set to true + ctrlWithWarmup, err := controller.New("warmup-enabled-ctrl", m, controller.Options{ + Reconciler: reconcile.Func(nil), + EnableWarmup: ptr.To(true), + }) + Expect(err).NotTo(HaveOccurred()) + + internalCtrlWithWarmup, ok := ctrlWithWarmup.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + Expect(internalCtrlWithWarmup.EnableWarmup).To(HaveValue(BeTrue())) + + // Test with EnableWarmup set to false + ctrlWithoutWarmup, err := controller.New("warmup-disabled-ctrl", m, controller.Options{ + Reconciler: reconcile.Func(nil), + EnableWarmup: ptr.To(false), + }) + Expect(err).NotTo(HaveOccurred()) -func (*failRec) Reconcile(reconcile.Request) (reconcile.Result, error) { - return reconcile.Result{}, nil -} + internalCtrlWithoutWarmup, ok := ctrlWithoutWarmup.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + Expect(internalCtrlWithoutWarmup.EnableWarmup).To(HaveValue(BeFalse())) -func (*failRec) InjectClient(client.Client) error { - return fmt.Errorf("expected error") -} + // Test with EnableWarmup not set (should default to nil) + ctrlWithDefaultWarmup, err := controller.New("warmup-default-ctrl", m, controller.Options{ + Reconciler: reconcile.Func(nil), + }) + Expect(err).NotTo(HaveOccurred()) + + internalCtrlWithDefaultWarmup, ok := ctrlWithDefaultWarmup.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + Expect(internalCtrlWithDefaultWarmup.EnableWarmup).To(BeNil()) + }) + + It("should inherit EnableWarmup from manager config", func() { + // Test with manager default setting EnableWarmup to true + managerWithWarmup, err := manager.New(cfg, manager.Options{ + Controller: config.Controller{ + EnableWarmup: ptr.To(true), + }, + }) + Expect(err).NotTo(HaveOccurred()) + ctrlInheritingWarmup, err := controller.New("inherit-warmup-enabled", managerWithWarmup, controller.Options{ + Reconciler: reconcile.Func(nil), + }) + Expect(err).NotTo(HaveOccurred()) + + internalCtrlInheritingWarmup, ok := ctrlInheritingWarmup.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + Expect(internalCtrlInheritingWarmup.EnableWarmup).To(HaveValue(BeTrue())) + + // Test that explicit controller setting overrides manager setting + ctrlOverridingWarmup, err := controller.New("override-warmup-disabled", managerWithWarmup, controller.Options{ + Reconciler: reconcile.Func(nil), + EnableWarmup: ptr.To(false), + }) + Expect(err).NotTo(HaveOccurred()) + + internalCtrlOverridingWarmup, ok := ctrlOverridingWarmup.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + Expect(internalCtrlOverridingWarmup.EnableWarmup).To(HaveValue(BeFalse())) + }) + + It("should default ReconciliationTimeout from manager if unset", func() { + m, err := manager.New(cfg, manager.Options{ + Controller: config.Controller{ReconciliationTimeout: 30 * time.Second}, + }) + Expect(err).NotTo(HaveOccurred()) + + c, err := controller.New("mgr-reconciliation-timeout", m, controller.Options{ + Reconciler: rec, + }) + Expect(err).NotTo(HaveOccurred()) + + ctrl, ok := c.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + + Expect(ctrl.ReconciliationTimeout).To(Equal(30 * time.Second)) + }) + + It("should not override an existing ReconciliationTimeout", func() { + m, err := manager.New(cfg, manager.Options{ + Controller: config.Controller{ReconciliationTimeout: 30 * time.Second}, + }) + Expect(err).NotTo(HaveOccurred()) + + c, err := controller.New("ctrl-reconciliation-timeout", m, controller.Options{ + Reconciler: rec, + ReconciliationTimeout: time.Minute, + }) + Expect(err).NotTo(HaveOccurred()) + + ctrl, ok := c.(*internalcontroller.Controller[reconcile.Request]) + Expect(ok).To(BeTrue()) + + Expect(ctrl.ReconciliationTimeout).To(Equal(time.Minute)) + }) + }) +}) diff --git a/pkg/controller/controllertest/testing.go b/pkg/controller/controllertest/testing.go index 570a97e36d..2b481d2116 100644 --- a/pkg/controller/controllertest/testing.go +++ b/pkg/controller/controllertest/testing.go @@ -17,11 +17,13 @@ limitations under the License. package controllertest import ( + "sync" "time" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/reconcile" ) var _ runtime.Object = &ErrorType{} @@ -29,34 +31,43 @@ var _ runtime.Object = &ErrorType{} // ErrorType implements runtime.Object but isn't registered in any scheme and should cause errors in tests as a result. type ErrorType struct{} -// GetObjectKind implements runtime.Object +// GetObjectKind implements runtime.Object. func (ErrorType) GetObjectKind() schema.ObjectKind { return nil } -// DeepCopyObject implements runtime.Object +// DeepCopyObject implements runtime.Object. func (ErrorType) DeepCopyObject() runtime.Object { return nil } -var _ workqueue.RateLimitingInterface = Queue{} +var _ workqueue.TypedRateLimitingInterface[reconcile.Request] = &Queue{} // Queue implements a RateLimiting queue as a non-ratelimited queue for testing. // This helps testing by having functions that use a RateLimiting queue synchronously add items to the queue. -type Queue struct { - workqueue.Interface +type Queue = TypedQueue[reconcile.Request] + +// TypedQueue implements a RateLimiting queue as a non-ratelimited queue for testing. +// This helps testing by having functions that use a RateLimiting queue synchronously add items to the queue. +type TypedQueue[request comparable] struct { + workqueue.TypedInterface[request] + AddedRateLimitedLock sync.Mutex + AddedRatelimited []any } // AddAfter implements RateLimitingInterface. -func (q Queue) AddAfter(item interface{}, duration time.Duration) { +func (q *TypedQueue[request]) AddAfter(item request, duration time.Duration) { q.Add(item) } // AddRateLimited implements RateLimitingInterface. TODO(community): Implement this. -func (q Queue) AddRateLimited(item interface{}) { +func (q *TypedQueue[request]) AddRateLimited(item request) { + q.AddedRateLimitedLock.Lock() + q.AddedRatelimited = append(q.AddedRatelimited, item) + q.AddedRateLimitedLock.Unlock() q.Add(item) } // Forget implements RateLimitingInterface. TODO(community): Implement this. -func (q Queue) Forget(item interface{}) {} +func (q *TypedQueue[request]) Forget(item request) {} // NumRequeues implements RateLimitingInterface. TODO(community): Implement this. -func (q Queue) NumRequeues(item interface{}) int { +func (q *TypedQueue[request]) NumRequeues(item request) int { return 0 } diff --git a/pkg/controller/controllertest/unconventionallisttypecrd.go b/pkg/controller/controllertest/unconventionallisttypecrd.go new file mode 100644 index 0000000000..d0f5017154 --- /dev/null +++ b/pkg/controller/controllertest/unconventionallisttypecrd.go @@ -0,0 +1,76 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllertest + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var _ runtime.Object = &UnconventionalListType{} +var _ runtime.Object = &UnconventionalListTypeList{} + +// UnconventionalListType is used to test CRDs with List types that +// have a slice of pointers rather than a slice of literals. +type UnconventionalListType struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec string `json:"spec,omitempty"` +} + +// DeepCopyObject implements runtime.Object +// Handwritten for simplicity. +func (u *UnconventionalListType) DeepCopyObject() runtime.Object { + return u.DeepCopy() +} + +// DeepCopy implements *UnconventionalListType +// Handwritten for simplicity. +func (u *UnconventionalListType) DeepCopy() *UnconventionalListType { + return &UnconventionalListType{ + TypeMeta: u.TypeMeta, + ObjectMeta: *u.ObjectMeta.DeepCopy(), + Spec: u.Spec, + } +} + +// UnconventionalListTypeList is used to test CRDs with List types that +// have a slice of pointers rather than a slice of literals. +type UnconventionalListTypeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []*UnconventionalListType `json:"items"` +} + +// DeepCopyObject implements runtime.Object +// Handwritten for simplicity. +func (u *UnconventionalListTypeList) DeepCopyObject() runtime.Object { + return u.DeepCopy() +} + +// DeepCopy implements *UnconventionalListTypeListt +// Handwritten for simplicity. +func (u *UnconventionalListTypeList) DeepCopy() *UnconventionalListTypeList { + out := &UnconventionalListTypeList{ + TypeMeta: u.TypeMeta, + ListMeta: *u.ListMeta.DeepCopy(), + } + for _, item := range u.Items { + out.Items = append(out.Items, item.DeepCopy()) + } + return out +} diff --git a/pkg/controller/controllertest/util.go b/pkg/controller/controllertest/util.go index 5557c80cfa..2c9a248899 100644 --- a/pkg/controller/controllertest/util.go +++ b/pkg/controller/controllertest/util.go @@ -17,6 +17,7 @@ limitations under the License. package controllertest import ( + "context" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -25,7 +26,7 @@ import ( var _ cache.SharedIndexInformer = &FakeInformer{} -// FakeInformer provides fake Informer functionality for testing +// FakeInformer provides fake Informer functionality for testing. type FakeInformer struct { // Synced is returned by the HasSynced functions to implement the Informer interface Synced bool @@ -51,45 +52,62 @@ func (f *FakeInformer) Informer() cache.SharedIndexInformer { return f } -// HasSynced implements the Informer interface. Returns f.Synced +// HasSynced implements the Informer interface. Returns f.Synced. func (f *FakeInformer) HasSynced() bool { return f.Synced } -// AddEventHandler implements the Informer interface. Adds an EventHandler to the fake Informers. -func (f *FakeInformer) AddEventHandler(handler cache.ResourceEventHandler) { +// AddEventHandler implements the Informer interface. Adds an EventHandler to the fake Informers. TODO(community): Implement Registration. +func (f *FakeInformer) AddEventHandler(handler cache.ResourceEventHandler) (cache.ResourceEventHandlerRegistration, error) { f.handlers = append(f.handlers, handler) + return nil, nil } -// Run implements the Informer interface. Increments f.RunCount +// AddEventHandlerWithResyncPeriod implements the Informer interface. Adds an EventHandler to the fake Informers (ignores resyncPeriod). TODO(community): Implement Registration. +func (f *FakeInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, _ time.Duration) (cache.ResourceEventHandlerRegistration, error) { + f.handlers = append(f.handlers, handler) + return nil, nil +} + +// AddEventHandlerWithOptions implements the Informer interface. Adds an EventHandler to the fake Informers (ignores options). TODO(community): Implement Registration. +func (f *FakeInformer) AddEventHandlerWithOptions(handler cache.ResourceEventHandler, _ cache.HandlerOptions) (cache.ResourceEventHandlerRegistration, error) { + f.handlers = append(f.handlers, handler) + return nil, nil +} + +// Run implements the Informer interface. Increments f.RunCount. func (f *FakeInformer) Run(<-chan struct{}) { f.RunCount++ } -// Add fakes an Add event for obj +func (f *FakeInformer) RunWithContext(_ context.Context) { + f.RunCount++ +} + +// Add fakes an Add event for obj. func (f *FakeInformer) Add(obj metav1.Object) { for _, h := range f.handlers { - h.OnAdd(obj) + h.OnAdd(obj, false) } } -// Update fakes an Update event for obj +// Update fakes an Update event for obj. func (f *FakeInformer) Update(oldObj, newObj metav1.Object) { for _, h := range f.handlers { h.OnUpdate(oldObj, newObj) } } -// Delete fakes an Delete event for obj +// Delete fakes an Delete event for obj. func (f *FakeInformer) Delete(obj metav1.Object) { for _, h := range f.handlers { h.OnDelete(obj) } } -// AddEventHandlerWithResyncPeriod does nothing. TODO(community): Implement this. -func (f *FakeInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) { - +// RemoveEventHandler does nothing. TODO(community): Implement this. +func (f *FakeInformer) RemoveEventHandler(handle cache.ResourceEventHandlerRegistration) error { + return nil } // GetStore does nothing. TODO(community): Implement this. @@ -106,3 +124,23 @@ func (f *FakeInformer) GetController() cache.Controller { func (f *FakeInformer) LastSyncResourceVersion() string { return "" } + +// SetWatchErrorHandler does nothing. TODO(community): Implement this. +func (f *FakeInformer) SetWatchErrorHandler(cache.WatchErrorHandler) error { + return nil +} + +// SetWatchErrorHandlerWithContext does nothing. TODO(community): Implement this. +func (f *FakeInformer) SetWatchErrorHandlerWithContext(cache.WatchErrorHandlerWithContext) error { + return nil +} + +// SetTransform does nothing. TODO(community): Implement this. +func (f *FakeInformer) SetTransform(t cache.TransformFunc) error { + return nil +} + +// IsStopped does nothing. TODO(community): Implement this. +func (f *FakeInformer) IsStopped() bool { + return false +} diff --git a/pkg/controller/controllerutil/controllerutil.go b/pkg/controller/controllerutil/controllerutil.go index e9f44b65c5..0f12b934ee 100644 --- a/pkg/controller/controllerutil/controllerutil.go +++ b/pkg/controller/controllerutil/controllerutil.go @@ -20,18 +20,23 @@ import ( "context" "fmt" "reflect" + "slices" - "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) // AlreadyOwnedError is an error returned if the object you are trying to assign // a controller reference is already owned by another controller Object is the -// subject and Owner is the reference for the current owner +// subject and Owner is the reference for the current owner. type AlreadyOwnedError struct { Object metav1.Object Owner metav1.OwnerReference @@ -41,53 +46,226 @@ func (e *AlreadyOwnedError) Error() string { return fmt.Sprintf("Object %s/%s is already owned by another %s controller %s", e.Object.GetNamespace(), e.Object.GetName(), e.Owner.Kind, e.Owner.Name) } -func newAlreadyOwnedError(Object metav1.Object, Owner metav1.OwnerReference) *AlreadyOwnedError { +func newAlreadyOwnedError(obj metav1.Object, owner metav1.OwnerReference) *AlreadyOwnedError { return &AlreadyOwnedError{ - Object: Object, - Owner: Owner, + Object: obj, + Owner: owner, + } +} + +// OwnerReferenceOption is a function that can modify a `metav1.OwnerReference`. +type OwnerReferenceOption func(*metav1.OwnerReference) + +// WithBlockOwnerDeletion allows configuring the BlockOwnerDeletion field on the `metav1.OwnerReference`. +func WithBlockOwnerDeletion(blockOwnerDeletion bool) OwnerReferenceOption { + return func(ref *metav1.OwnerReference) { + ref.BlockOwnerDeletion = &blockOwnerDeletion } } -// SetControllerReference sets owner as a Controller OwnerReference on owned. -// This is used for garbage collection of the owned object and for -// reconciling the owner object on changes to owned (with a Watch + EnqueueRequestForOwner). +// SetControllerReference sets owner as a Controller OwnerReference on controlled. +// This is used for garbage collection of the controlled object and for +// reconciling the owner object on changes to controlled (with a Watch + EnqueueRequestForOwner). // Since only one OwnerReference can be a controller, it returns an error if // there is another OwnerReference with Controller flag set. -func SetControllerReference(owner, object metav1.Object, scheme *runtime.Scheme) error { +func SetControllerReference(owner, controlled metav1.Object, scheme *runtime.Scheme, opts ...OwnerReferenceOption) error { + // Validate the owner. ro, ok := owner.(runtime.Object) if !ok { - return fmt.Errorf("is not a %T a runtime.Object, cannot call SetControllerReference", owner) + return fmt.Errorf("%T is not a runtime.Object, cannot call SetControllerReference", owner) } + if err := validateOwner(owner, controlled); err != nil { + return err + } + + // Create a new controller ref. + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return err + } + ref := metav1.OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + Name: owner.GetName(), + UID: owner.GetUID(), + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + } + for _, opt := range opts { + opt(&ref) + } + + // Return early with an error if the object is already controlled. + if existing := metav1.GetControllerOf(controlled); existing != nil && !referSameObject(*existing, ref) { + return newAlreadyOwnedError(controlled, *existing) + } + + // Update owner references and return. + upsertOwnerRef(ref, controlled) + return nil +} +// SetOwnerReference is a helper method to make sure the given object contains an object reference to the object provided. +// This allows you to declare that owner has a dependency on the object without specifying it as a controller. +// If a reference to the same object already exists, it'll be overwritten with the newly provided version. +func SetOwnerReference(owner, object metav1.Object, scheme *runtime.Scheme, opts ...OwnerReferenceOption) error { + // Validate the owner. + ro, ok := owner.(runtime.Object) + if !ok { + return fmt.Errorf("%T is not a runtime.Object, cannot call SetOwnerReference", owner) + } + if err := validateOwner(owner, object); err != nil { + return err + } + + // Create a new owner ref. + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return err + } + ref := metav1.OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + UID: owner.GetUID(), + Name: owner.GetName(), + } + for _, opt := range opts { + opt(&ref) + } + + // Update owner references and return. + upsertOwnerRef(ref, object) + return nil +} + +// RemoveOwnerReference is a helper method to make sure the given object removes an owner reference to the object provided. +// This allows you to remove the owner to establish a new owner of the object in a subsequent call. +func RemoveOwnerReference(owner, object metav1.Object, scheme *runtime.Scheme) error { + owners := object.GetOwnerReferences() + length := len(owners) + if length < 1 { + return fmt.Errorf("%T does not have any owner references", object) + } + ro, ok := owner.(runtime.Object) + if !ok { + return fmt.Errorf("%T is not a runtime.Object, cannot call RemoveOwnerReference", owner) + } gvk, err := apiutil.GVKForObject(ro, scheme) if err != nil { return err } - // Create a new ref - ref := *metav1.NewControllerRef(owner, schema.GroupVersionKind{Group: gvk.Group, Version: gvk.Version, Kind: gvk.Kind}) + index := indexOwnerRef(owners, metav1.OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Name: owner.GetName(), + Kind: gvk.Kind, + }) + if index == -1 { + return fmt.Errorf("%T does not have an owner reference for %T", object, owner) + } + + owners = append(owners[:index], owners[index+1:]...) + object.SetOwnerReferences(owners) + return nil +} - existingRefs := object.GetOwnerReferences() - fi := -1 - for i, r := range existingRefs { - if referSameObject(ref, r) { - fi = i - } else if r.Controller != nil && *r.Controller { - return newAlreadyOwnedError(object, r) +// HasControllerReference returns true if the object +// has an owner ref with controller equal to true +func HasControllerReference(object metav1.Object) bool { + owners := object.GetOwnerReferences() + for _, owner := range owners { + isTrue := owner.Controller + if owner.Controller != nil && *isTrue { + return true } } - if fi == -1 { - existingRefs = append(existingRefs, ref) + return false +} + +// HasOwnerReference returns true if the owners list contains an owner reference +// that matches the object's group, kind, and name. +func HasOwnerReference(ownerRefs []metav1.OwnerReference, obj client.Object, scheme *runtime.Scheme) (bool, error) { + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return false, err + } + idx := indexOwnerRef(ownerRefs, metav1.OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Name: obj.GetName(), + Kind: gvk.Kind, + }) + return idx != -1, nil +} + +// RemoveControllerReference removes an owner reference where the controller +// equals true +func RemoveControllerReference(owner, object metav1.Object, scheme *runtime.Scheme) error { + if ok := HasControllerReference(object); !ok { + return fmt.Errorf("%T does not have a owner reference with controller equals true", object) + } + ro, ok := owner.(runtime.Object) + if !ok { + return fmt.Errorf("%T is not a runtime.Object, cannot call RemoveControllerReference", owner) + } + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return err + } + ownerRefs := object.GetOwnerReferences() + index := indexOwnerRef(ownerRefs, metav1.OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Name: owner.GetName(), + Kind: gvk.Kind, + }) + + if index == -1 { + return fmt.Errorf("%T does not have an controller reference for %T", object, owner) + } + + if ownerRefs[index].Controller == nil || !*ownerRefs[index].Controller { + return fmt.Errorf("%T owner is not the controller reference for %T", owner, object) + } + + ownerRefs = append(ownerRefs[:index], ownerRefs[index+1:]...) + object.SetOwnerReferences(ownerRefs) + return nil +} + +func upsertOwnerRef(ref metav1.OwnerReference, object metav1.Object) { + owners := object.GetOwnerReferences() + if idx := indexOwnerRef(owners, ref); idx == -1 { + owners = append(owners, ref) } else { - existingRefs[fi] = ref + owners[idx] = ref } + object.SetOwnerReferences(owners) +} + +// indexOwnerRef returns the index of the owner reference in the slice if found, or -1. +func indexOwnerRef(ownerReferences []metav1.OwnerReference, ref metav1.OwnerReference) int { + for index, r := range ownerReferences { + if referSameObject(r, ref) { + return index + } + } + return -1 +} - // Update owner references - object.SetOwnerReferences(existingRefs) +func validateOwner(owner, object metav1.Object) error { + ownerNs := owner.GetNamespace() + if ownerNs != "" { + objNs := object.GetNamespace() + if objNs == "" { + return fmt.Errorf("cluster-scoped resource must not have a namespace-scoped owner, owner's namespace %s", ownerNs) + } + if ownerNs != objNs { + return fmt.Errorf("cross-namespace owner references are disallowed, owner's namespace %s, obj's namespace %s", owner.GetNamespace(), object.GetNamespace()) + } + } return nil } -// Returns true if a and b point to the same object +// Returns true if a and b point to the same object. func referSameObject(a, b metav1.OwnerReference) bool { aGV, err := schema.ParseGroupVersion(a.APIVersion) if err != nil { @@ -98,42 +276,60 @@ func referSameObject(a, b metav1.OwnerReference) bool { if err != nil { return false } - - return aGV == bGV && a.Kind == b.Kind && a.Name == b.Name + return aGV.Group == bGV.Group && a.Kind == b.Kind && a.Name == b.Name } -// OperationResult is the action result of a CreateOrUpdate call +// OperationResult is the action result of a CreateOrUpdate or CreateOrPatch call. type OperationResult string const ( // They should complete the sentence "Deployment default/foo has been ..." - // OperationResultNone means that the resource has not been changed + // OperationResultNone means that the resource has not been changed. OperationResultNone OperationResult = "unchanged" - // OperationResultCreated means that a new resource is created + // OperationResultCreated means that a new resource is created. OperationResultCreated OperationResult = "created" - // OperationResultUpdated means that an existing resource is updated + // OperationResultUpdated means that an existing resource is updated. OperationResultUpdated OperationResult = "updated" + // OperationResultUpdatedStatus means that an existing resource and its status is updated. + OperationResultUpdatedStatus OperationResult = "updatedStatus" + // OperationResultUpdatedStatusOnly means that only an existing status is updated. + OperationResultUpdatedStatusOnly OperationResult = "updatedStatusOnly" ) -// CreateOrUpdate creates or updates the given object in the Kubernetes -// cluster. The object's desired state must be reconciled with the existing -// state inside the passed in callback MutateFn. +// CreateOrUpdate attempts to fetch the given object from the Kubernetes cluster. +// If the object didn't exist, MutateFn will be called, and it will be created. +// If the object did exist, MutateFn will be called, and if it changed the +// object, it will be updated. +// Otherwise, it will be left unchanged. +// The executed operation (and an error) will be returned. // -// The MutateFn is called regardless of creating or updating an object. +// WARNING: If the MutateFn resets a value on obj that has a default value, +// CreateOrUpdate will *always* perform an update. This is because when the +// object is fetched from the API server, the value will have taken on the +// default value, and the check for equality will fail. For example, Deployments +// must have a Replicas value set. If the MutateFn sets a Deployment's Replicas +// to nil, then it will never match with the object returned from the API +// server, which defaults the value to 1. // -// It returns the executed operation and an error. -func CreateOrUpdate(ctx context.Context, c client.Client, obj runtime.Object, f MutateFn) (OperationResult, error) { - key, err := client.ObjectKeyFromObject(obj) - if err != nil { - return OperationResultNone, err - } - +// WARNING: CreateOrUpdate assumes that no values have been set on obj aside +// from the Name/Namespace. Values other than Name and Namespace that existed on +// obj may be overwritten by the corresponding values in the object returned +// from the Kubernetes API server. When this happens, the Update will not work +// as expected. +// +// Note: changes made by MutateFn to any sub-resource (status...), will be +// discarded. +func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f MutateFn) (OperationResult, error) { + key := client.ObjectKeyFromObject(obj) if err := c.Get(ctx, key, obj); err != nil { - if !errors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { return OperationResultNone, err } - if err := mutate(f, key, obj); err != nil { - return OperationResultNone, err + if f != nil { + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } } + if err := c.Create(ctx, obj); err != nil { return OperationResultNone, err } @@ -141,11 +337,13 @@ func CreateOrUpdate(ctx context.Context, c client.Client, obj runtime.Object, f } existing := obj.DeepCopyObject() - if err := mutate(f, key, obj); err != nil { - return OperationResultNone, err + if f != nil { + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } } - if reflect.DeepEqual(existing, obj) { + if equality.Semantic.DeepEqual(existing, obj) { return OperationResultNone, nil } @@ -155,16 +353,182 @@ func CreateOrUpdate(ctx context.Context, c client.Client, obj runtime.Object, f return OperationResultUpdated, nil } -// mutate wraps a MutateFn and applies validation to its result -func mutate(f MutateFn, key client.ObjectKey, obj runtime.Object) error { +// CreateOrPatch attempts to fetch the given object from the Kubernetes cluster. +// If the object didn't exist, MutateFn will be called, and it will be created. +// If the object did exist, MutateFn will be called, and if it changed the +// object, it will be patched. +// Otherwise, it will be left unchanged. +// The executed operation (and an error) will be returned. +// +// WARNING: If the MutateFn resets a value on obj that has a default value, +// CreateOrPatch will *always* perform a patch. This is because when the +// object is fetched from the API server, the value will have taken on the +// default value, and the check for equality will fail. +// For example, Deployments must have a Replicas value set. If the MutateFn sets +// a Deployment's Replicas to nil, then it will never match with the object +// returned from the API server, which defaults the value to 1. +// +// WARNING: CreateOrPatch assumes that no values have been set on obj aside +// from the Name/Namespace. Values other than Name and Namespace that existed on +// obj may be overwritten by the corresponding values in the object returned +// from the Kubernetes API server. When this happens, the Patch will not work +// as expected. +// +// Note: changes to any sub-resource other than status will be ignored. +// Changes to the status sub-resource will only be applied if the object +// already exist. To change the status on object creation, the easiest +// way is to requeue the object in the controller if OperationResult is +// OperationResultCreated +func CreateOrPatch(ctx context.Context, c client.Client, obj client.Object, f MutateFn) (OperationResult, error) { + key := client.ObjectKeyFromObject(obj) + if err := c.Get(ctx, key, obj); err != nil { + if !apierrors.IsNotFound(err) { + return OperationResultNone, err + } + if f != nil { + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } + } + if err := c.Create(ctx, obj); err != nil { + return OperationResultNone, err + } + return OperationResultCreated, nil + } + + // Create patches for the object and its possible status. + objPatch := client.MergeFrom(obj.DeepCopyObject().(client.Object)) + statusPatch := client.MergeFrom(obj.DeepCopyObject().(client.Object)) + + // Create a copy of the original object as well as converting that copy to + // unstructured data. + before, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj.DeepCopyObject()) + if err != nil { + return OperationResultNone, err + } + + // Attempt to extract the status from the resource for easier comparison later + beforeStatus, hasBeforeStatus, err := unstructured.NestedFieldCopy(before, "status") + if err != nil { + return OperationResultNone, err + } + + // If the resource contains a status then remove it from the unstructured + // copy to avoid unnecessary patching later. + if hasBeforeStatus { + unstructured.RemoveNestedField(before, "status") + } + + // Mutate the original object. + if f != nil { + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } + } + + // Convert the resource to unstructured to compare against our before copy. + after, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return OperationResultNone, err + } + + // Attempt to extract the status from the resource for easier comparison later + afterStatus, hasAfterStatus, err := unstructured.NestedFieldCopy(after, "status") + if err != nil { + return OperationResultNone, err + } + + // If the resource contains a status then remove it from the unstructured + // copy to avoid unnecessary patching later. + if hasAfterStatus { + unstructured.RemoveNestedField(after, "status") + } + + result := OperationResultNone + + if !reflect.DeepEqual(before, after) { + // Only issue a Patch if the before and after resources (minus status) differ + if err := c.Patch(ctx, obj, objPatch); err != nil { + return result, err + } + result = OperationResultUpdated + } + + if (hasBeforeStatus || hasAfterStatus) && !reflect.DeepEqual(beforeStatus, afterStatus) { + // Only issue a Status Patch if the resource has a status and the beforeStatus + // and afterStatus copies differ + if result == OperationResultUpdated { + // If Status was replaced by Patch before, set it to afterStatus + objectAfterPatch, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return result, err + } + if err = unstructured.SetNestedField(objectAfterPatch, afterStatus, "status"); err != nil { + return result, err + } + // If Status was replaced by Patch before, restore patched structure to the obj + if err = runtime.DefaultUnstructuredConverter.FromUnstructured(objectAfterPatch, obj); err != nil { + return result, err + } + } + if err := c.Status().Patch(ctx, obj, statusPatch); err != nil { + return result, err + } + if result == OperationResultUpdated { + result = OperationResultUpdatedStatus + } else { + result = OperationResultUpdatedStatusOnly + } + } + + return result, nil +} + +// mutate wraps a MutateFn and applies validation to its result. +func mutate(f MutateFn, key client.ObjectKey, obj client.Object) error { if err := f(); err != nil { return err } - if newKey, err := client.ObjectKeyFromObject(obj); err != nil || key != newKey { + if newKey := client.ObjectKeyFromObject(obj); key != newKey { return fmt.Errorf("MutateFn cannot mutate object name and/or object namespace") } return nil } -// MutateFn is a function which mutates the existing object into it's desired state. +// MutateFn is a function which mutates the existing object into its desired state. type MutateFn func() error + +// AddFinalizer accepts an Object and adds the provided finalizer if not present. +// It returns an indication of whether it updated the object's list of finalizers. +func AddFinalizer(o client.Object, finalizer string) (finalizersUpdated bool) { + f := o.GetFinalizers() + if slices.Contains(f, finalizer) { + return false + } + o.SetFinalizers(append(f, finalizer)) + return true +} + +// RemoveFinalizer accepts an Object and removes the provided finalizer if present. +// It returns an indication of whether it updated the object's list of finalizers. +func RemoveFinalizer(o client.Object, finalizer string) (finalizersUpdated bool) { + f := o.GetFinalizers() + length := len(f) + + index := 0 + for i := range length { + if f[i] == finalizer { + continue + } + f[index] = f[i] + index++ + } + o.SetFinalizers(f[:index]) + return length != index +} + +// ContainsFinalizer checks an Object that the provided finalizer is present. +func ContainsFinalizer(o client.Object, finalizer string) bool { + f := o.GetFinalizers() + return slices.Contains(f, finalizer) +} diff --git a/pkg/controller/controllerutil/controllerutil_suite_test.go b/pkg/controller/controllerutil/controllerutil_suite_test.go index 248316eaa7..a4ac5cc746 100644 --- a/pkg/controller/controllerutil/controllerutil_suite_test.go +++ b/pkg/controller/controllerutil/controllerutil_suite_test.go @@ -19,7 +19,7 @@ package controllerutil_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/client-go/rest" @@ -32,16 +32,16 @@ func TestControllerutil(t *testing.T) { RunSpecs(t, "Controllerutil Suite") } -var t *envtest.Environment +var testenv *envtest.Environment var cfg *rest.Config var c client.Client var _ = BeforeSuite(func() { var err error - t = &envtest.Environment{} + testenv = &envtest.Environment{} - cfg, err = t.Start() + cfg, err = testenv.Start() Expect(err).NotTo(HaveOccurred()) c, err = client.New(cfg, client.Options{}) @@ -49,5 +49,5 @@ var _ = BeforeSuite(func() { }) var _ = AfterSuite(func() { - Expect(t.Stop()).To(Succeed()) + Expect(testenv.Stop()).To(Succeed()) }) diff --git a/pkg/controller/controllerutil/controllerutil_test.go b/pkg/controller/controllerutil/controllerutil_test.go index d6346894a4..a716667f6a 100644 --- a/pkg/controller/controllerutil/controllerutil_test.go +++ b/pkg/controller/controllerutil/controllerutil_test.go @@ -21,9 +21,7 @@ import ( "fmt" "math/rand" - "sigs.k8s.io/controller-runtime/pkg/client" - - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -32,10 +30,252 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) var _ = Describe("Controllerutil", func() { + Describe("SetOwnerReference", func() { + It("should set ownerRef on an empty list", func() { + rs := &appsv1.ReplicaSet{} + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid"}, + } + Expect(controllerutil.SetOwnerReference(dep, rs, scheme.Scheme)).ToNot(HaveOccurred()) + Expect(rs.OwnerReferences).To(ConsistOf(metav1.OwnerReference{ + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1beta1", + UID: "foo-uid", + })) + }) + + It("should set the BlockOwnerDeletion if it is specified as an option", func() { + t := true + rs := &appsv1.ReplicaSet{} + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid"}, + } + + Expect(controllerutil.SetOwnerReference(dep, rs, scheme.Scheme, controllerutil.WithBlockOwnerDeletion(true))).ToNot(HaveOccurred()) + Expect(rs.OwnerReferences).To(ConsistOf(metav1.OwnerReference{ + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1beta1", + UID: "foo-uid", + BlockOwnerDeletion: &t, + })) + }) + + It("should not duplicate owner references", func() { + rs := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1beta1", + UID: "foo-uid", + }, + }, + }, + } + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid"}, + } + + Expect(controllerutil.SetOwnerReference(dep, rs, scheme.Scheme)).ToNot(HaveOccurred()) + Expect(rs.OwnerReferences).To(ConsistOf(metav1.OwnerReference{ + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1beta1", + UID: "foo-uid", + })) + }) + + It("should update the reference", func() { + rs := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1alpha1", + UID: "foo-uid-1", + }, + }, + }, + } + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid-2"}, + } + + Expect(controllerutil.SetOwnerReference(dep, rs, scheme.Scheme)).ToNot(HaveOccurred()) + Expect(rs.OwnerReferences).To(ConsistOf(metav1.OwnerReference{ + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1beta1", + UID: "foo-uid-2", + })) + }) + It("should remove the owner reference", func() { + rs := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1alpha1", + UID: "foo-uid-1", + }, + }, + }, + } + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid-2"}, + } + + Expect(controllerutil.SetOwnerReference(dep, rs, scheme.Scheme)).ToNot(HaveOccurred()) + Expect(rs.OwnerReferences).To(ConsistOf(metav1.OwnerReference{ + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1beta1", + UID: "foo-uid-2", + })) + Expect(controllerutil.RemoveOwnerReference(dep, rs, scheme.Scheme)).ToNot(HaveOccurred()) + Expect(rs.GetOwnerReferences()).To(BeEmpty()) + }) + It("should remove the owner reference established by the SetControllerReference function", func() { + rs := &appsv1.ReplicaSet{} + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid"}, + } + + Expect(controllerutil.SetControllerReference(dep, rs, scheme.Scheme)).NotTo(HaveOccurred()) + t := true + Expect(rs.OwnerReferences).To(ConsistOf(metav1.OwnerReference{ + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1beta1", + UID: "foo-uid", + Controller: &t, + BlockOwnerDeletion: &t, + })) + Expect(controllerutil.RemoveOwnerReference(dep, rs, scheme.Scheme)).NotTo(HaveOccurred()) + Expect(rs.GetOwnerReferences()).To(BeEmpty()) + }) + It("should error when trying to remove the reference that doesn't exist", func() { + rs := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{}, + } + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid-2"}, + } + Expect(controllerutil.RemoveOwnerReference(dep, rs, scheme.Scheme)).To(HaveOccurred()) + Expect(rs.GetOwnerReferences()).To(BeEmpty()) + }) + It("should error when trying to remove the reference that doesn't abide by the scheme", func() { + rs := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{}, + } + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid-2"}, + } + Expect(controllerutil.SetOwnerReference(dep, rs, scheme.Scheme)).ToNot(HaveOccurred()) + Expect(controllerutil.RemoveOwnerReference(dep, rs, runtime.NewScheme())).To(HaveOccurred()) + Expect(rs.GetOwnerReferences()).To(HaveLen(1)) + }) + It("should error when trying to remove the owner when setting the owner as a non runtime.Object", func() { + var obj metav1.Object + rs := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{}, + } + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid-2"}, + } + Expect(controllerutil.SetOwnerReference(dep, rs, scheme.Scheme)).ToNot(HaveOccurred()) + Expect(controllerutil.RemoveOwnerReference(obj, rs, scheme.Scheme)).To(HaveOccurred()) + Expect(rs.GetOwnerReferences()).To(HaveLen(1)) + }) + + It("should error when trying to remove an owner that doesn't exist", func() { + rs := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{}, + } + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid-2"}, + } + dep2 := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "bar", UID: "bar-uid-3"}, + } + Expect(controllerutil.SetOwnerReference(dep, rs, scheme.Scheme)).ToNot(HaveOccurred()) + Expect(controllerutil.RemoveOwnerReference(dep2, rs, scheme.Scheme)).To(HaveOccurred()) + Expect(rs.GetOwnerReferences()).To(HaveLen(1)) + }) + + It("should return true when HasControllerReference evaluates owner reference set by SetControllerReference", func() { + rs := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{}, + } + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid-2"}, + } + Expect(controllerutil.SetControllerReference(dep, rs, scheme.Scheme)).ToNot(HaveOccurred()) + Expect(controllerutil.HasControllerReference(rs)).To(BeTrue()) + }) + + It("should return false when HasControllerReference evaluates owner reference set by SetOwnerReference", func() { + rs := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{}, + } + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid-2"}, + } + Expect(controllerutil.SetOwnerReference(dep, rs, scheme.Scheme)).ToNot(HaveOccurred()) + Expect(controllerutil.HasControllerReference(rs)).To(BeFalse()) + }) + + It("should error when RemoveControllerReference owner's controller is set to false", func() { + rs := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{}, + } + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid-2"}, + } + Expect(controllerutil.SetOwnerReference(dep, rs, scheme.Scheme)).ToNot(HaveOccurred()) + Expect(controllerutil.RemoveControllerReference(dep, rs, scheme.Scheme)).To(HaveOccurred()) + }) + + It("should error when RemoveControllerReference passed in owner is not the owner", func() { + rs := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{}, + } + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid-2"}, + } + dep2 := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo-2", UID: "foo-uid-42"}, + } + Expect(controllerutil.SetControllerReference(dep, rs, scheme.Scheme)).ToNot(HaveOccurred()) + Expect(controllerutil.SetOwnerReference(dep2, rs, scheme.Scheme)).ToNot(HaveOccurred()) + Expect(controllerutil.RemoveControllerReference(dep2, rs, scheme.Scheme)).To(HaveOccurred()) + Expect(rs.GetOwnerReferences()).To(HaveLen(2)) + }) + + It("should not error when RemoveControllerReference owner's controller is set to true", func() { + rs := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{}, + } + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid-2"}, + } + Expect(controllerutil.SetControllerReference(dep, rs, scheme.Scheme)).ToNot(HaveOccurred()) + Expect(controllerutil.RemoveControllerReference(dep, rs, scheme.Scheme)).ToNot(HaveOccurred()) + Expect(rs.GetOwnerReferences()).To(BeEmpty()) + }) + }) + Describe("SetControllerReference", func() { It("should set the OwnerReference if it can find the group version kind", func() { rs := &appsv1.ReplicaSet{} @@ -115,6 +355,97 @@ var _ = Describe("Controllerutil", func() { BlockOwnerDeletion: &t, })) }) + + It("should replace the owner reference if it's already present", func() { + t := true + rsOwners := []metav1.OwnerReference{ + { + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1alpha1", + UID: "foo-uid", + Controller: &t, + BlockOwnerDeletion: &t, + }, + } + rs := &appsv1.ReplicaSet{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default", OwnerReferences: rsOwners}} + dep := &extensionsv1beta1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default", UID: "foo-uid"}} + + Expect(controllerutil.SetControllerReference(dep, rs, scheme.Scheme)).NotTo(HaveOccurred()) + Expect(rs.OwnerReferences).To(ConsistOf(metav1.OwnerReference{ + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1beta1", + UID: "foo-uid", + Controller: &t, + BlockOwnerDeletion: &t, + })) + }) + + It("should return an error if it's setting a cross-namespace owner reference", func() { + rs := &appsv1.ReplicaSet{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "namespace1"}} + dep := &extensionsv1beta1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "namespace2", UID: "foo-uid"}} + + err := controllerutil.SetControllerReference(dep, rs, scheme.Scheme) + + Expect(err).To(HaveOccurred()) + }) + + It("should return an error if it's owner is namespaced resource but dependant is cluster-scoped resource", func() { + pv := &corev1.PersistentVolume{ObjectMeta: metav1.ObjectMeta{Name: "foo"}} + pod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default", UID: "foo-uid"}} + + err := controllerutil.SetControllerReference(pod, pv, scheme.Scheme) + + Expect(err).To(HaveOccurred()) + }) + + It("should not return any error if the existing owner has a different version", func() { + f := false + t := true + rsOwners := []metav1.OwnerReference{ + { + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1alpha1", + UID: "foo-uid", + Controller: &f, + BlockOwnerDeletion: &t, + }, + } + rs := &appsv1.ReplicaSet{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default", OwnerReferences: rsOwners}} + dep := &extensionsv1beta1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default", UID: "foo-uid"}} + + Expect(controllerutil.SetControllerReference(dep, rs, scheme.Scheme)).NotTo(HaveOccurred()) + Expect(rs.OwnerReferences).To(ConsistOf(metav1.OwnerReference{ + Name: "foo", + Kind: "Deployment", + // APIVersion is the new owner's one + APIVersion: "extensions/v1beta1", + UID: "foo-uid", + Controller: &t, + BlockOwnerDeletion: &t, + })) + }) + + It("should set the BlockOwnerDeletion if it is specified as an option", func() { + f := false + t := true + rs := &appsv1.ReplicaSet{} + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid"}, + } + + Expect(controllerutil.SetControllerReference(dep, rs, scheme.Scheme, controllerutil.WithBlockOwnerDeletion(false))).NotTo(HaveOccurred()) + Expect(rs.OwnerReferences).To(ConsistOf(metav1.OwnerReference{ + Name: "foo", + Kind: "Deployment", + APIVersion: "extensions/v1beta1", + UID: "foo-uid", + Controller: &t, + BlockOwnerDeletion: &f, + })) + }) }) Describe("CreateOrUpdate", func() { @@ -160,8 +491,180 @@ var _ = Describe("Controllerutil", func() { specr = deploymentSpecr(deploy, deplSpec) }) - It("creates a new object if one doesn't exists", func() { - op, err := controllerutil.CreateOrUpdate(context.TODO(), c, deploy, specr) + It("creates a new object if one doesn't exists", func(ctx SpecContext) { + op, err := controllerutil.CreateOrUpdate(ctx, c, deploy, specr) + + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning OperationResultCreated") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) + + By("actually having the deployment created") + fetched := &appsv1.Deployment{} + Expect(c.Get(ctx, deplKey, fetched)).To(Succeed()) + + By("being mutated by MutateFn") + Expect(fetched.Spec.Template.Spec.Containers).To(HaveLen(1)) + Expect(fetched.Spec.Template.Spec.Containers[0].Name).To(Equal(deplSpec.Template.Spec.Containers[0].Name)) + Expect(fetched.Spec.Template.Spec.Containers[0].Image).To(Equal(deplSpec.Template.Spec.Containers[0].Image)) + }) + + It("updates existing object", func(ctx SpecContext) { + var scale int32 = 2 + op, err := controllerutil.CreateOrUpdate(ctx, c, deploy, specr) + Expect(err).NotTo(HaveOccurred()) + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) + + op, err = controllerutil.CreateOrUpdate(ctx, c, deploy, deploymentScaler(deploy, scale)) + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning OperationResultUpdated") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultUpdated)) + + By("actually having the deployment scaled") + fetched := &appsv1.Deployment{} + Expect(c.Get(ctx, deplKey, fetched)).To(Succeed()) + Expect(*fetched.Spec.Replicas).To(Equal(scale)) + }) + + It("updates only changed objects", func(ctx SpecContext) { + op, err := controllerutil.CreateOrUpdate(ctx, c, deploy, specr) + + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) + Expect(err).NotTo(HaveOccurred()) + + op, err = controllerutil.CreateOrUpdate(ctx, c, deploy, deploymentIdentity) + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning OperationResultNone") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultNone)) + }) + + It("errors when MutateFn changes object name on creation", func(ctx SpecContext) { + op, err := controllerutil.CreateOrUpdate(ctx, c, deploy, func() error { + Expect(specr()).To(Succeed()) + return deploymentRenamer(deploy)() + }) + + By("returning error") + Expect(err).To(HaveOccurred()) + + By("returning OperationResultNone") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultNone)) + }) + + It("errors when MutateFn renames an object", func(ctx SpecContext) { + op, err := controllerutil.CreateOrUpdate(ctx, c, deploy, specr) + + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) + Expect(err).NotTo(HaveOccurred()) + + op, err = controllerutil.CreateOrUpdate(ctx, c, deploy, deploymentRenamer(deploy)) + + By("returning error") + Expect(err).To(HaveOccurred()) + + By("returning OperationResultNone") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultNone)) + }) + + It("errors when object namespace changes", func(ctx SpecContext) { + op, err := controllerutil.CreateOrUpdate(ctx, c, deploy, specr) + + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) + Expect(err).NotTo(HaveOccurred()) + + op, err = controllerutil.CreateOrUpdate(ctx, c, deploy, deploymentNamespaceChanger(deploy)) + + By("returning error") + Expect(err).To(HaveOccurred()) + + By("returning OperationResultNone") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultNone)) + }) + + It("aborts immediately if there was an error initially retrieving the object", func(ctx SpecContext) { + op, err := controllerutil.CreateOrUpdate(ctx, errorReader{c}, deploy, func() error { + Fail("Mutation method should not run") + return nil + }) + + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultNone)) + Expect(err).To(HaveOccurred()) + }) + }) + + Describe("CreateOrPatch", func() { + var deploy *appsv1.Deployment + var deplSpec appsv1.DeploymentSpec + var deplKey types.NamespacedName + var specr controllerutil.MutateFn + + BeforeEach(func() { + deploy = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("deploy-%d", rand.Int31()), + Namespace: "default", + }, + } + + deplSpec = appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox", + }, + }, + }, + }, + } + + deplKey = types.NamespacedName{ + Name: deploy.Name, + Namespace: deploy.Namespace, + } + + specr = deploymentSpecr(deploy, deplSpec) + }) + + assertLocalDeployWasUpdated := func(ctx context.Context, fetched *appsv1.Deployment) { + By("local deploy object was updated during patch & has same spec, status, resource version as fetched") + if fetched == nil { + fetched = &appsv1.Deployment{} + ExpectWithOffset(1, c.Get(ctx, deplKey, fetched)).To(Succeed()) + } + ExpectWithOffset(1, fetched.ResourceVersion).To(Equal(deploy.ResourceVersion)) + ExpectWithOffset(1, fetched.Spec).To(BeEquivalentTo(deploy.Spec)) + ExpectWithOffset(1, fetched.Status).To(BeEquivalentTo(deploy.Status)) + } + + assertLocalDeployStatusWasUpdated := func(ctx context.Context, fetched *appsv1.Deployment) { + By("local deploy object was updated during patch & has same spec, status, resource version as fetched") + if fetched == nil { + fetched = &appsv1.Deployment{} + ExpectWithOffset(1, c.Get(ctx, deplKey, fetched)).To(Succeed()) + } + ExpectWithOffset(1, fetched.ResourceVersion).To(Equal(deploy.ResourceVersion)) + ExpectWithOffset(1, *fetched.Spec.Replicas).To(BeEquivalentTo(int32(5))) + ExpectWithOffset(1, fetched.Status).To(BeEquivalentTo(deploy.Status)) + ExpectWithOffset(1, len(fetched.Status.Conditions)).To(BeEquivalentTo(1)) + } + + It("creates a new object if one doesn't exists", func(ctx SpecContext) { + op, err := controllerutil.CreateOrPatch(ctx, c, deploy, specr) By("returning no error") Expect(err).NotTo(HaveOccurred()) @@ -171,7 +674,7 @@ var _ = Describe("Controllerutil", func() { By("actually having the deployment created") fetched := &appsv1.Deployment{} - Expect(c.Get(context.TODO(), deplKey, fetched)).To(Succeed()) + Expect(c.Get(ctx, deplKey, fetched)).To(Succeed()) By("being mutated by MutateFn") Expect(fetched.Spec.Template.Spec.Containers).To(HaveLen(1)) @@ -179,13 +682,13 @@ var _ = Describe("Controllerutil", func() { Expect(fetched.Spec.Template.Spec.Containers[0].Image).To(Equal(deplSpec.Template.Spec.Containers[0].Image)) }) - It("updates existing object", func() { + It("patches existing object", func(ctx SpecContext) { var scale int32 = 2 - op, err := controllerutil.CreateOrUpdate(context.TODO(), c, deploy, specr) + op, err := controllerutil.CreateOrPatch(ctx, c, deploy, specr) Expect(err).NotTo(HaveOccurred()) Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) - op, err = controllerutil.CreateOrUpdate(context.TODO(), c, deploy, deploymentScaler(deploy, scale)) + op, err = controllerutil.CreateOrPatch(ctx, c, deploy, deploymentScaler(deploy, scale)) By("returning no error") Expect(err).NotTo(HaveOccurred()) @@ -194,26 +697,113 @@ var _ = Describe("Controllerutil", func() { By("actually having the deployment scaled") fetched := &appsv1.Deployment{} - Expect(c.Get(context.TODO(), deplKey, fetched)).To(Succeed()) + Expect(c.Get(ctx, deplKey, fetched)).To(Succeed()) Expect(*fetched.Spec.Replicas).To(Equal(scale)) + assertLocalDeployWasUpdated(ctx, fetched) }) - It("updates only changed objects", func() { - op, err := controllerutil.CreateOrUpdate(context.TODO(), c, deploy, specr) + It("patches only changed objects", func(ctx SpecContext) { + op, err := controllerutil.CreateOrPatch(ctx, c, deploy, specr) Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) Expect(err).NotTo(HaveOccurred()) - op, err = controllerutil.CreateOrUpdate(context.TODO(), c, deploy, deploymentIdentity) + op, err = controllerutil.CreateOrPatch(ctx, c, deploy, deploymentIdentity) By("returning no error") Expect(err).NotTo(HaveOccurred()) By("returning OperationResultNone") Expect(op).To(BeEquivalentTo(controllerutil.OperationResultNone)) + + assertLocalDeployWasUpdated(ctx, nil) + }) + + It("patches only changed status", func(ctx SpecContext) { + op, err := controllerutil.CreateOrPatch(ctx, c, deploy, specr) + + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) + Expect(err).NotTo(HaveOccurred()) + + deployStatus := appsv1.DeploymentStatus{ + ReadyReplicas: 1, + Replicas: 3, + } + op, err = controllerutil.CreateOrPatch(ctx, c, deploy, deploymentStatusr(deploy, deployStatus)) + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning OperationResultUpdatedStatusOnly") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultUpdatedStatusOnly)) + + assertLocalDeployWasUpdated(ctx, nil) + }) + + It("patches resource and status", func(ctx SpecContext) { + op, err := controllerutil.CreateOrPatch(ctx, c, deploy, specr) + + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) + Expect(err).NotTo(HaveOccurred()) + + replicas := int32(3) + deployStatus := appsv1.DeploymentStatus{ + ReadyReplicas: 1, + Replicas: replicas, + } + op, err = controllerutil.CreateOrPatch(ctx, c, deploy, func() error { + Expect(deploymentScaler(deploy, replicas)()).To(Succeed()) + return deploymentStatusr(deploy, deployStatus)() + }) + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning OperationResultUpdatedStatus") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultUpdatedStatus)) + + assertLocalDeployWasUpdated(ctx, nil) }) - It("errors when MutateFn changes objct name on creation", func() { - op, err := controllerutil.CreateOrUpdate(context.TODO(), c, deploy, func() error { + It("patches resource and not empty status", func(ctx SpecContext) { + op, err := controllerutil.CreateOrPatch(ctx, c, deploy, specr) + + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) + Expect(err).NotTo(HaveOccurred()) + + replicas := int32(3) + deployStatus := appsv1.DeploymentStatus{ + ReadyReplicas: 1, + Replicas: replicas, + } + op, err = controllerutil.CreateOrPatch(ctx, c, deploy, func() error { + Expect(deploymentScaler(deploy, replicas)()).To(Succeed()) + return deploymentStatusr(deploy, deployStatus)() + }) + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning OperationResultUpdatedStatus") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultUpdatedStatus)) + + assertLocalDeployWasUpdated(ctx, nil) + + op, err = controllerutil.CreateOrPatch(ctx, c, deploy, func() error { + deploy.Spec.Replicas = ptr.To(int32(5)) + deploy.Status.Conditions = []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionTrue, + }} + return nil + }) + By("returning no error") + Expect(err).NotTo(HaveOccurred()) + + By("returning OperationResultUpdatedStatus") + Expect(op).To(BeEquivalentTo(controllerutil.OperationResultUpdatedStatus)) + + assertLocalDeployStatusWasUpdated(ctx, nil) + }) + + It("errors when MutateFn changes object name on creation", func(ctx SpecContext) { + op, err := controllerutil.CreateOrPatch(ctx, c, deploy, func() error { Expect(specr()).To(Succeed()) return deploymentRenamer(deploy)() }) @@ -225,13 +815,13 @@ var _ = Describe("Controllerutil", func() { Expect(op).To(BeEquivalentTo(controllerutil.OperationResultNone)) }) - It("errors when MutateFn renames an object", func() { - op, err := controllerutil.CreateOrUpdate(context.TODO(), c, deploy, specr) + It("errors when MutateFn renames an object", func(ctx SpecContext) { + op, err := controllerutil.CreateOrPatch(ctx, c, deploy, specr) Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) Expect(err).NotTo(HaveOccurred()) - op, err = controllerutil.CreateOrUpdate(context.TODO(), c, deploy, deploymentRenamer(deploy)) + op, err = controllerutil.CreateOrPatch(ctx, c, deploy, deploymentRenamer(deploy)) By("returning error") Expect(err).To(HaveOccurred()) @@ -240,13 +830,13 @@ var _ = Describe("Controllerutil", func() { Expect(op).To(BeEquivalentTo(controllerutil.OperationResultNone)) }) - It("errors when object namespace changes", func() { - op, err := controllerutil.CreateOrUpdate(context.TODO(), c, deploy, specr) + It("errors when object namespace changes", func(ctx SpecContext) { + op, err := controllerutil.CreateOrPatch(ctx, c, deploy, specr) Expect(op).To(BeEquivalentTo(controllerutil.OperationResultCreated)) Expect(err).NotTo(HaveOccurred()) - op, err = controllerutil.CreateOrUpdate(context.TODO(), c, deploy, deploymentNamespaceChanger(deploy)) + op, err = controllerutil.CreateOrPatch(ctx, c, deploy, deploymentNamespaceChanger(deploy)) By("returning error") Expect(err).To(HaveOccurred()) @@ -255,8 +845,8 @@ var _ = Describe("Controllerutil", func() { Expect(op).To(BeEquivalentTo(controllerutil.OperationResultNone)) }) - It("aborts immediately if there was an error initially retrieving the object", func() { - op, err := controllerutil.CreateOrUpdate(context.TODO(), errorReader{c}, deploy, func() error { + It("aborts immediately if there was an error initially retrieving the object", func(ctx SpecContext) { + op, err := controllerutil.CreateOrPatch(ctx, errorReader{c}, deploy, func() error { Fail("Mutation method should not run") return nil }) @@ -265,9 +855,155 @@ var _ = Describe("Controllerutil", func() { Expect(err).To(HaveOccurred()) }) }) + + Describe("Finalizers", func() { + var deploy *appsv1.Deployment + + Describe("AddFinalizer", func() { + deploy = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{}, + }, + } + + It("should add the finalizer when not present", func() { + controllerutil.AddFinalizer(deploy, testFinalizer) + Expect(deploy.ObjectMeta.GetFinalizers()).To(Equal([]string{testFinalizer})) + }) + + It("should not add the finalizer when already present", func() { + controllerutil.AddFinalizer(deploy, testFinalizer) + Expect(deploy.ObjectMeta.GetFinalizers()).To(Equal([]string{testFinalizer})) + }) + }) + + Describe("RemoveFinalizer", func() { + It("should remove finalizer if present", func() { + controllerutil.RemoveFinalizer(deploy, testFinalizer) + Expect(deploy.ObjectMeta.GetFinalizers()).To(Equal([]string{})) + }) + + It("should remove all equal finalizers if present", func() { + deploy.SetFinalizers(append(deploy.Finalizers, testFinalizer, testFinalizer)) + controllerutil.RemoveFinalizer(deploy, testFinalizer) + Expect(deploy.ObjectMeta.GetFinalizers()).To(Equal([]string{})) + }) + }) + + Describe("AddFinalizer, which returns an indication of whether it modified the object's list of finalizers,", func() { + deploy = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{}, + }, + } + + When("the object's list of finalizers has no instances of the input finalizer", func() { + It("should return true", func() { + Expect(controllerutil.AddFinalizer(deploy, testFinalizer)).To(BeTrue()) + }) + It("should add the input finalizer to the object's list of finalizers", func() { + Expect(deploy.ObjectMeta.GetFinalizers()).To(Equal([]string{testFinalizer})) + }) + }) + + When("the object's list of finalizers has an instance of the input finalizer", func() { + It("should return false", func() { + Expect(controllerutil.AddFinalizer(deploy, testFinalizer)).To(BeFalse()) + }) + It("should not modify the object's list of finalizers", func() { + Expect(deploy.ObjectMeta.GetFinalizers()).To(Equal([]string{testFinalizer})) + }) + }) + }) + + Describe("RemoveFinalizer, which returns an indication of whether it modified the object's list of finalizers,", func() { + When("the object's list of finalizers has no instances of the input finalizer", func() { + It("should return false", func() { + Expect(controllerutil.RemoveFinalizer(deploy, testFinalizer1)).To(BeFalse()) + }) + It("should not modify the object's list of finalizers", func() { + Expect(deploy.ObjectMeta.GetFinalizers()).To(Equal([]string{testFinalizer})) + }) + }) + + When("the object's list of finalizers has one instance of the input finalizer", func() { + It("should return true", func() { + Expect(controllerutil.RemoveFinalizer(deploy, testFinalizer)).To(BeTrue()) + }) + It("should remove the instance of the input finalizer from the object's list of finalizers", func() { + Expect(deploy.ObjectMeta.GetFinalizers()).To(Equal([]string{})) + }) + }) + + When("the object's list of finalizers has multiple instances of the input finalizer", func() { + It("should return true", func() { + deploy.SetFinalizers(append(deploy.Finalizers, testFinalizer, testFinalizer)) + Expect(controllerutil.RemoveFinalizer(deploy, testFinalizer)).To(BeTrue()) + }) + It("should remove each instance of the input finalizer from the object's list of finalizers", func() { + Expect(deploy.ObjectMeta.GetFinalizers()).To(Equal([]string{})) + }) + }) + }) + + Describe("ContainsFinalizer", func() { + It("should check that finalizer is present", func() { + controllerutil.AddFinalizer(deploy, testFinalizer) + Expect(controllerutil.ContainsFinalizer(deploy, testFinalizer)).To(BeTrue()) + }) + + It("should check that finalizer is not present after RemoveFinalizer call", func() { + controllerutil.RemoveFinalizer(deploy, testFinalizer) + Expect(controllerutil.ContainsFinalizer(deploy, testFinalizer)).To(BeFalse()) + }) + }) + + Describe("HasOwnerReference", func() { + It("should return true if the object has the owner reference", func() { + rs := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid"}, + } + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid"}, + } + Expect(controllerutil.SetOwnerReference(dep, rs, scheme.Scheme)).ToNot(HaveOccurred()) + b, err := controllerutil.HasOwnerReference(rs.GetOwnerReferences(), dep, scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + Expect(b).To(BeTrue()) + }) + + It("should return false if the object does not have the owner reference", func() { + rs := &appsv1.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid"}, + } + dep := &extensionsv1beta1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "foo-uid"}, + } + b, err := controllerutil.HasOwnerReference(rs.GetOwnerReferences(), dep, scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + Expect(b).To(BeFalse()) + }) + }) + }) }) -var _ metav1.Object = &errMetaObj{} +const ( + testFinalizer = "foo.bar.baz" + testFinalizer1 = testFinalizer + "1" +) + +var ( + _ runtime.Object = &errRuntimeObj{} + _ metav1.Object = &errMetaObj{} +) + +type errRuntimeObj struct { + runtime.TypeMeta +} + +func (o *errRuntimeObj) DeepCopyObject() runtime.Object { + return &errRuntimeObj{} +} type errMetaObj struct { metav1.ObjectMeta @@ -280,6 +1016,13 @@ func deploymentSpecr(deploy *appsv1.Deployment, spec appsv1.DeploymentSpec) cont } } +func deploymentStatusr(deploy *appsv1.Deployment, status appsv1.DeploymentStatus) controllerutil.MutateFn { + return func() error { + deploy.Status = status + return nil + } +} + var deploymentIdentity controllerutil.MutateFn = func() error { return nil } @@ -295,7 +1038,6 @@ func deploymentNamespaceChanger(deploy *appsv1.Deployment) controllerutil.Mutate return func() error { deploy.Namespace = fmt.Sprintf("%s-1", deploy.Namespace) return nil - } } @@ -311,6 +1053,6 @@ type errorReader struct { client.Client } -func (e errorReader) Get(ctx context.Context, key client.ObjectKey, into runtime.Object) error { +func (e errorReader) Get(ctx context.Context, key client.ObjectKey, into client.Object, opts ...client.GetOption) error { return fmt.Errorf("unexpected error") } diff --git a/pkg/controller/controllerutil/example_test.go b/pkg/controller/controllerutil/example_test.go index 7670dd1852..b2d6f71a5c 100644 --- a/pkg/controller/controllerutil/example_test.go +++ b/pkg/controller/controllerutil/example_test.go @@ -31,7 +31,7 @@ var ( log = logf.Log.WithName("controllerutil-examples") ) -// This example creates or updates an existing deployment +// This example creates or updates an existing deployment. func ExampleCreateOrUpdate() { // c is client.Client @@ -39,7 +39,6 @@ func ExampleCreateOrUpdate() { deploy := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} op, err := controllerutil.CreateOrUpdate(context.TODO(), c, deploy, func() error { - // Deployment selector is immutable so we set this value only if // a new object is going to be created if deploy.ObjectMeta.CreationTimestamp.IsZero() { diff --git a/pkg/controller/doc.go b/pkg/controller/doc.go index 667b14fdd7..228335e929 100644 --- a/pkg/controller/doc.go +++ b/pkg/controller/doc.go @@ -17,7 +17,7 @@ limitations under the License. /* Package controller provides types and functions for building Controllers. Controllers implement Kubernetes APIs. -Creation +# Creation To create a new Controller, first create a manager.Manager and pass it to the controller.New function. The Controller MUST be started by calling Manager.Start. diff --git a/pkg/controller/example_test.go b/pkg/controller/example_test.go index 5b6e3c31dd..e3c4b6a092 100644 --- a/pkg/controller/example_test.go +++ b/pkg/controller/example_test.go @@ -17,6 +17,7 @@ limitations under the License. package controller_test import ( + "context" "os" corev1 "k8s.io/api/core/v1" @@ -41,7 +42,7 @@ var ( // manager.Manager will be used to Start the Controller, and will provide it a shared Cache and Client. func ExampleNew() { _, err := controller.New("pod-controller", mgr, controller.Options{ - Reconciler: reconcile.Func(func(o reconcile.Request) (reconcile.Result, error) { + Reconciler: reconcile.Func(func(context.Context, reconcile.Request) (reconcile.Result, error) { // Your business logic to implement the API by creating, updating, deleting objects goes here. return reconcile.Result{}, nil }), @@ -59,7 +60,7 @@ func ExampleController() { // Create a new Controller that will call the provided Reconciler function in response // to events. c, err := controller.New("pod-controller", mgr, controller.Options{ - Reconciler: reconcile.Func(func(o reconcile.Request) (reconcile.Result, error) { + Reconciler: reconcile.Func(func(context.Context, reconcile.Request) (reconcile.Result, error) { // Your business logic to implement the API by creating, updating, deleting objects goes here. return reconcile.Result{}, nil }), @@ -70,7 +71,7 @@ func ExampleController() { } // Watch for Pod create / update / delete events and call Reconcile - err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForObject{}) + err = c.Watch(source.Kind(mgr.GetCache(), &corev1.Pod{}, &handler.TypedEnqueueRequestForObject[*corev1.Pod]{})) if err != nil { log.Error(err, "unable to watch pods") os.Exit(1) @@ -90,7 +91,7 @@ func ExampleController_unstructured() { // Create a new Controller that will call the provided Reconciler function in response // to events. c, err := controller.New("pod-controller", mgr, controller.Options{ - Reconciler: reconcile.Func(func(o reconcile.Request) (reconcile.Result, error) { + Reconciler: reconcile.Func(func(context.Context, reconcile.Request) (reconcile.Result, error) { // Your business logic to implement the API by creating, updating, deleting objects goes here. return reconcile.Result{}, nil }), @@ -107,7 +108,7 @@ func ExampleController_unstructured() { Version: "v1", }) // Watch for Pod create / update / delete events and call Reconcile - err = c.Watch(&source.Kind{Type: u}, &handler.EnqueueRequestForObject{}) + err = c.Watch(source.Kind(mgr.GetCache(), u, &handler.TypedEnqueueRequestForObject[*unstructured.Unstructured]{})) if err != nil { log.Error(err, "unable to watch pods") os.Exit(1) @@ -119,3 +120,46 @@ func ExampleController_unstructured() { os.Exit(1) } } + +// This example creates a new controller named "pod-controller" to watch Pods +// and call a no-op reconciler. The controller is not added to the provided +// manager, and must thus be started and stopped by the caller. +func ExampleNewUnmanaged() { + // mgr is a manager.Manager + + // Configure creates a new controller but does not add it to the supplied + // manager. + c, err := controller.NewUnmanaged("pod-controller", controller.Options{ + Reconciler: reconcile.Func(func(context.Context, reconcile.Request) (reconcile.Result, error) { + return reconcile.Result{}, nil + }), + }) + if err != nil { + log.Error(err, "unable to create pod-controller") + os.Exit(1) + } + + if err := c.Watch(source.Kind(mgr.GetCache(), &corev1.Pod{}, &handler.TypedEnqueueRequestForObject[*corev1.Pod]{})); err != nil { + log.Error(err, "unable to watch pods") + os.Exit(1) + } + + ctx, cancel := context.WithCancel(context.Background()) + + // Start our controller in a goroutine so that we do not block. + go func() { + // Block until our controller manager is elected leader. We presume our + // entire process will terminate if we lose leadership, so we don't need + // to handle that. + <-mgr.Elected() + + // Start our controller. This will block until the context is + // closed, or the controller returns an error. + if err := c.Start(ctx); err != nil { + log.Error(err, "cannot run experiment controller") + } + }() + + // Stop our controller. + cancel() +} diff --git a/pkg/controller/name.go b/pkg/controller/name.go new file mode 100644 index 0000000000..00ca655128 --- /dev/null +++ b/pkg/controller/name.go @@ -0,0 +1,43 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "fmt" + "sync" + + "k8s.io/apimachinery/pkg/util/sets" +) + +var nameLock sync.Mutex +var usedNames sets.Set[string] + +func checkName(name string) error { + nameLock.Lock() + defer nameLock.Unlock() + if usedNames == nil { + usedNames = sets.Set[string]{} + } + + if usedNames.Has(name) { + return fmt.Errorf("controller with name %s already exists. Controller names must be unique to avoid multiple controllers reporting the same metric. This validation can be disabled via the SkipNameValidation option", name) + } + + usedNames.Insert(name) + + return nil +} diff --git a/pkg/controller/priorityqueue/metrics.go b/pkg/controller/priorityqueue/metrics.go new file mode 100644 index 0000000000..967a252dfb --- /dev/null +++ b/pkg/controller/priorityqueue/metrics.go @@ -0,0 +1,172 @@ +package priorityqueue + +import ( + "sync" + "time" + + "k8s.io/client-go/util/workqueue" + "k8s.io/utils/clock" + "sigs.k8s.io/controller-runtime/pkg/internal/metrics" +) + +// This file is mostly a copy of unexported code from +// https://github.com/kubernetes/kubernetes/blob/1d8828ce707ed9dd7a6a9756385419cce1d202ac/staging/src/k8s.io/client-go/util/workqueue/metrics.go +// +// The only two differences are the addition of mapLock in defaultQueueMetrics and converging retryMetrics into queueMetrics. + +type queueMetrics[T comparable] interface { + add(item T, priority int) + get(item T, priority int) + updateDepthWithPriorityMetric(oldPriority, newPriority int) + done(item T) + updateUnfinishedWork() + retry() +} + +func newQueueMetrics[T comparable](mp workqueue.MetricsProvider, name string, clock clock.Clock) queueMetrics[T] { + if len(name) == 0 { + return noMetrics[T]{} + } + + dqm := &defaultQueueMetrics[T]{ + clock: clock, + adds: mp.NewAddsMetric(name), + latency: mp.NewLatencyMetric(name), + workDuration: mp.NewWorkDurationMetric(name), + unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name), + longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name), + addTimes: map[T]time.Time{}, + processingStartTimes: map[T]time.Time{}, + retries: mp.NewRetriesMetric(name), + } + + if mpp, ok := mp.(metrics.MetricsProviderWithPriority); ok { + dqm.depthWithPriority = mpp.NewDepthMetricWithPriority(name) + } else { + dqm.depth = mp.NewDepthMetric(name) + } + return dqm +} + +// defaultQueueMetrics expects the caller to lock before setting any metrics. +type defaultQueueMetrics[T comparable] struct { + clock clock.Clock + + // current depth of a workqueue + depth workqueue.GaugeMetric + depthWithPriority metrics.DepthMetricWithPriority + // total number of adds handled by a workqueue + adds workqueue.CounterMetric + // how long an item stays in a workqueue + latency workqueue.HistogramMetric + // how long processing an item from a workqueue takes + workDuration workqueue.HistogramMetric + + mapLock sync.RWMutex + addTimes map[T]time.Time + processingStartTimes map[T]time.Time + + // how long have current threads been working? + unfinishedWorkSeconds workqueue.SettableGaugeMetric + longestRunningProcessor workqueue.SettableGaugeMetric + + retries workqueue.CounterMetric +} + +// add is called for ready items only +func (m *defaultQueueMetrics[T]) add(item T, priority int) { + if m == nil { + return + } + + m.adds.Inc() + if m.depthWithPriority != nil { + m.depthWithPriority.Inc(priority) + } else { + m.depth.Inc() + } + + m.mapLock.Lock() + defer m.mapLock.Unlock() + + if _, exists := m.addTimes[item]; !exists { + m.addTimes[item] = m.clock.Now() + } +} + +func (m *defaultQueueMetrics[T]) get(item T, priority int) { + if m == nil { + return + } + + if m.depthWithPriority != nil { + m.depthWithPriority.Dec(priority) + } else { + m.depth.Dec() + } + + m.mapLock.Lock() + defer m.mapLock.Unlock() + + m.processingStartTimes[item] = m.clock.Now() + if startTime, exists := m.addTimes[item]; exists { + m.latency.Observe(m.sinceInSeconds(startTime)) + delete(m.addTimes, item) + } +} + +func (m *defaultQueueMetrics[T]) updateDepthWithPriorityMetric(oldPriority, newPriority int) { + if m.depthWithPriority != nil { + m.depthWithPriority.Dec(oldPriority) + m.depthWithPriority.Inc(newPriority) + } +} + +func (m *defaultQueueMetrics[T]) done(item T) { + if m == nil { + return + } + + m.mapLock.Lock() + defer m.mapLock.Unlock() + if startTime, exists := m.processingStartTimes[item]; exists { + m.workDuration.Observe(m.sinceInSeconds(startTime)) + delete(m.processingStartTimes, item) + } +} + +func (m *defaultQueueMetrics[T]) updateUnfinishedWork() { + m.mapLock.RLock() + defer m.mapLock.RUnlock() + // Note that a summary metric would be better for this, but prometheus + // doesn't seem to have non-hacky ways to reset the summary metrics. + var total float64 + var oldest float64 + for _, t := range m.processingStartTimes { + age := m.sinceInSeconds(t) + total += age + if age > oldest { + oldest = age + } + } + m.unfinishedWorkSeconds.Set(total) + m.longestRunningProcessor.Set(oldest) +} + +// Gets the time since the specified start in seconds. +func (m *defaultQueueMetrics[T]) sinceInSeconds(start time.Time) float64 { + return m.clock.Since(start).Seconds() +} + +func (m *defaultQueueMetrics[T]) retry() { + m.retries.Inc() +} + +type noMetrics[T any] struct{} + +func (noMetrics[T]) add(item T, priority int) {} +func (noMetrics[T]) get(item T, priority int) {} +func (noMetrics[T]) updateDepthWithPriorityMetric(oldPriority, newPriority int) {} +func (noMetrics[T]) done(item T) {} +func (noMetrics[T]) updateUnfinishedWork() {} +func (noMetrics[T]) retry() {} diff --git a/pkg/controller/priorityqueue/metrics_test.go b/pkg/controller/priorityqueue/metrics_test.go new file mode 100644 index 0000000000..3be3989d89 --- /dev/null +++ b/pkg/controller/priorityqueue/metrics_test.go @@ -0,0 +1,141 @@ +package priorityqueue + +import ( + "sync" + + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/internal/metrics" +) + +func newFakeMetricsProvider() *fakeMetricsProvider { + return &fakeMetricsProvider{ + depth: make(map[string]map[int]int), + adds: make(map[string]int), + latency: make(map[string][]float64), + workDuration: make(map[string][]float64), + unfinishedWorkSeconds: make(map[string]float64), + longestRunningProcessor: make(map[string]float64), + retries: make(map[string]int), + mu: sync.Mutex{}, + } +} + +var _ metrics.MetricsProviderWithPriority = &fakeMetricsProvider{} + +type fakeMetricsProvider struct { + depth map[string]map[int]int + adds map[string]int + latency map[string][]float64 + workDuration map[string][]float64 + unfinishedWorkSeconds map[string]float64 + longestRunningProcessor map[string]float64 + retries map[string]int + mu sync.Mutex +} + +func (f *fakeMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { + panic("Should never be called. Expected NewDepthMetricWithPriority to be called instead") +} + +func (f *fakeMetricsProvider) NewDepthMetricWithPriority(name string) metrics.DepthMetricWithPriority { + f.mu.Lock() + defer f.mu.Unlock() + f.depth[name] = map[int]int{} + return &fakeGaugeMetric{m: &f.depth, mu: &f.mu, name: name} +} + +func (f *fakeMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { + f.mu.Lock() + defer f.mu.Unlock() + f.adds[name] = 0 + return &fakeCounterMetric{m: &f.adds, mu: &f.mu, name: name} +} + +func (f *fakeMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { + f.mu.Lock() + defer f.mu.Unlock() + f.latency[name] = []float64{} + return &fakeHistogramMetric{m: &f.latency, mu: &f.mu, name: name} +} + +func (f *fakeMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { + f.mu.Lock() + defer f.mu.Unlock() + f.workDuration[name] = []float64{} + return &fakeHistogramMetric{m: &f.workDuration, mu: &f.mu, name: name} +} + +func (f *fakeMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { + f.mu.Lock() + defer f.mu.Unlock() + f.unfinishedWorkSeconds[name] = 0 + return &fakeSettableGaugeMetric{m: &f.unfinishedWorkSeconds, mu: &f.mu, name: name} +} + +func (f *fakeMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { + f.mu.Lock() + defer f.mu.Unlock() + f.longestRunningProcessor[name] = 0 + return &fakeSettableGaugeMetric{m: &f.longestRunningProcessor, mu: &f.mu, name: name} +} + +func (f *fakeMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { + f.mu.Lock() + defer f.mu.Unlock() + f.retries[name] = 0 + return &fakeCounterMetric{m: &f.retries, mu: &f.mu, name: name} +} + +type fakeGaugeMetric struct { + m *map[string]map[int]int + mu *sync.Mutex + name string +} + +func (fg *fakeGaugeMetric) Inc(priority int) { + fg.mu.Lock() + defer fg.mu.Unlock() + (*fg.m)[fg.name][priority]++ +} + +func (fg *fakeGaugeMetric) Dec(priority int) { + fg.mu.Lock() + defer fg.mu.Unlock() + (*fg.m)[fg.name][priority]-- +} + +type fakeCounterMetric struct { + m *map[string]int + mu *sync.Mutex + name string +} + +func (fc *fakeCounterMetric) Inc() { + fc.mu.Lock() + defer fc.mu.Unlock() + (*fc.m)[fc.name]++ +} + +type fakeHistogramMetric struct { + m *map[string][]float64 + mu *sync.Mutex + name string +} + +func (fh *fakeHistogramMetric) Observe(v float64) { + fh.mu.Lock() + defer fh.mu.Unlock() + (*fh.m)[fh.name] = append((*fh.m)[fh.name], v) +} + +type fakeSettableGaugeMetric struct { + m *map[string]float64 + mu *sync.Mutex + name string +} + +func (fs *fakeSettableGaugeMetric) Set(v float64) { + fs.mu.Lock() + defer fs.mu.Unlock() + (*fs.m)[fs.name] = v +} diff --git a/pkg/controller/priorityqueue/priorityqueue.go b/pkg/controller/priorityqueue/priorityqueue.go new file mode 100644 index 0000000000..98df84c56b --- /dev/null +++ b/pkg/controller/priorityqueue/priorityqueue.go @@ -0,0 +1,460 @@ +package priorityqueue + +import ( + "math" + "sync" + "sync/atomic" + "time" + + "github.com/go-logr/logr" + "github.com/google/btree" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/util/workqueue" + "k8s.io/utils/clock" + "k8s.io/utils/ptr" + + "sigs.k8s.io/controller-runtime/pkg/internal/metrics" +) + +// AddOpts describes the options for adding items to the queue. +type AddOpts struct { + After time.Duration + RateLimited bool + // Priority is the priority of the item. Higher values + // indicate higher priority. + // Defaults to zero if unset. + Priority *int +} + +// PriorityQueue is a priority queue for a controller. It +// internally de-duplicates all items that are added to +// it. It will use the max of the passed priorities and the +// min of possible durations. +type PriorityQueue[T comparable] interface { + workqueue.TypedRateLimitingInterface[T] + AddWithOpts(o AddOpts, Items ...T) + GetWithPriority() (item T, priority int, shutdown bool) +} + +// Opts contains the options for a PriorityQueue. +type Opts[T comparable] struct { + // Ratelimiter is being used when AddRateLimited is called. Defaults to a per-item exponential backoff + // limiter with an initial delay of five milliseconds and a max delay of 1000 seconds. + RateLimiter workqueue.TypedRateLimiter[T] + MetricProvider workqueue.MetricsProvider + Log logr.Logger +} + +// Opt allows to configure a PriorityQueue. +type Opt[T comparable] func(*Opts[T]) + +// New constructs a new PriorityQueue. +func New[T comparable](name string, o ...Opt[T]) PriorityQueue[T] { + opts := &Opts[T]{} + for _, f := range o { + f(opts) + } + + if opts.RateLimiter == nil { + opts.RateLimiter = workqueue.NewTypedItemExponentialFailureRateLimiter[T](5*time.Millisecond, 1000*time.Second) + } + + if opts.MetricProvider == nil { + opts.MetricProvider = metrics.WorkqueueMetricsProvider{} + } + + pq := &priorityqueue[T]{ + log: opts.Log, + items: map[T]*item[T]{}, + queue: btree.NewG(32, less[T]), + becameReady: sets.Set[T]{}, + metrics: newQueueMetrics[T](opts.MetricProvider, name, clock.RealClock{}), + // itemOrWaiterAdded indicates that an item or + // waiter was added. It must be buffered, because + // if we currently process items we can't tell + // if that included the new item/waiter. + itemOrWaiterAdded: make(chan struct{}, 1), + rateLimiter: opts.RateLimiter, + locked: sets.Set[T]{}, + done: make(chan struct{}), + get: make(chan item[T]), + now: time.Now, + tick: time.Tick, + } + + go pq.spin() + go pq.logState() + if _, ok := pq.metrics.(noMetrics[T]); !ok { + go pq.updateUnfinishedWorkLoop() + } + + return pq +} + +type priorityqueue[T comparable] struct { + log logr.Logger + // lock has to be acquired for any access any of items, queue, addedCounter + // or becameReady + lock sync.Mutex + items map[T]*item[T] + queue bTree[*item[T]] + + // addedCounter is a counter of elements added, we need it + // because unixNano is not guaranteed to be unique. + addedCounter uint64 + + // becameReady holds items that are in the queue, were added + // with non-zero after and became ready. We need it to call the + // metrics add exactly once for them. + becameReady sets.Set[T] + metrics queueMetrics[T] + + itemOrWaiterAdded chan struct{} + + rateLimiter workqueue.TypedRateLimiter[T] + + // locked contains the keys we handed out through Get() and that haven't + // yet been returned through Done(). + locked sets.Set[T] + lockedLock sync.RWMutex + + shutdown atomic.Bool + done chan struct{} + + get chan item[T] + + // waiters is the number of routines blocked in Get, we use it to determine + // if we can push items. + waiters atomic.Int64 + + // Configurable for testing + now func() time.Time + tick func(time.Duration) <-chan time.Time +} + +func (w *priorityqueue[T]) AddWithOpts(o AddOpts, items ...T) { + if w.shutdown.Load() { + return + } + + w.lock.Lock() + defer w.lock.Unlock() + + for _, key := range items { + after := o.After + if o.RateLimited { + rlAfter := w.rateLimiter.When(key) + if after == 0 || rlAfter < after { + after = rlAfter + } + } + + var readyAt *time.Time + if after > 0 { + readyAt = ptr.To(w.now().Add(after)) + w.metrics.retry() + } + if _, ok := w.items[key]; !ok { + item := &item[T]{ + Key: key, + AddedCounter: w.addedCounter, + Priority: ptr.Deref(o.Priority, 0), + ReadyAt: readyAt, + } + w.items[key] = item + w.queue.ReplaceOrInsert(item) + if item.ReadyAt == nil { + w.metrics.add(key, item.Priority) + } + w.addedCounter++ + continue + } + + // The b-tree de-duplicates based on ordering and any change here + // will affect the order - Just delete and re-add. + item, _ := w.queue.Delete(w.items[key]) + if newPriority := ptr.Deref(o.Priority, 0); newPriority > item.Priority { + // Update depth metric only if the item in the queue was already added to the depth metric. + if item.ReadyAt == nil || w.becameReady.Has(key) { + w.metrics.updateDepthWithPriorityMetric(item.Priority, newPriority) + } + item.Priority = newPriority + } + + if item.ReadyAt != nil && (readyAt == nil || readyAt.Before(*item.ReadyAt)) { + if readyAt == nil && !w.becameReady.Has(key) { + w.metrics.add(key, item.Priority) + } + item.ReadyAt = readyAt + } + + w.queue.ReplaceOrInsert(item) + } + + if len(items) > 0 { + w.notifyItemOrWaiterAdded() + } +} + +func (w *priorityqueue[T]) notifyItemOrWaiterAdded() { + select { + case w.itemOrWaiterAdded <- struct{}{}: + default: + } +} + +func (w *priorityqueue[T]) spin() { + blockForever := make(chan time.Time) + var nextReady <-chan time.Time + nextReady = blockForever + var nextItemReadyAt time.Time + + for { + select { + case <-w.done: + return + case <-w.itemOrWaiterAdded: + case <-nextReady: + nextReady = blockForever + nextItemReadyAt = time.Time{} + } + + func() { + w.lock.Lock() + defer w.lock.Unlock() + + w.lockedLock.Lock() + defer w.lockedLock.Unlock() + + // manipulating the tree from within Ascend might lead to panics, so + // track what we want to delete and do it after we are done ascending. + var toDelete []*item[T] + + var key T + + // Items in the queue tree are sorted first by priority and second by readiness, so + // items with a lower priority might be ready further down in the queue. + // We iterate through the priorities high to low until we find a ready item + pivot := item[T]{ + Key: key, + AddedCounter: 0, + Priority: math.MaxInt, + ReadyAt: nil, + } + + for { + pivotChange := false + + w.queue.AscendGreaterOrEqual(&pivot, func(item *item[T]) bool { + // Item is locked, we can not hand it out + if w.locked.Has(item.Key) { + return true + } + + if item.ReadyAt != nil { + if readyAt := item.ReadyAt.Sub(w.now()); readyAt > 0 { + if nextItemReadyAt.After(*item.ReadyAt) || nextItemReadyAt.IsZero() { + nextReady = w.tick(readyAt) + nextItemReadyAt = *item.ReadyAt + } + + // Adjusting the pivot item moves the ascend to the next lower priority + pivot.Priority = item.Priority - 1 + pivotChange = true + return false + } + if !w.becameReady.Has(item.Key) { + w.metrics.add(item.Key, item.Priority) + w.becameReady.Insert(item.Key) + } + } + + if w.waiters.Load() == 0 { + // Have to keep iterating here to ensure we update metrics + // for further items that became ready and set nextReady. + return true + } + + w.metrics.get(item.Key, item.Priority) + w.locked.Insert(item.Key) + w.waiters.Add(-1) + delete(w.items, item.Key) + toDelete = append(toDelete, item) + w.becameReady.Delete(item.Key) + w.get <- *item + + return true + }) + + if !pivotChange { + break + } + } + + for _, item := range toDelete { + w.queue.Delete(item) + } + }() + } +} + +func (w *priorityqueue[T]) Add(item T) { + w.AddWithOpts(AddOpts{}, item) +} + +func (w *priorityqueue[T]) AddAfter(item T, after time.Duration) { + w.AddWithOpts(AddOpts{After: after}, item) +} + +func (w *priorityqueue[T]) AddRateLimited(item T) { + w.AddWithOpts(AddOpts{RateLimited: true}, item) +} + +func (w *priorityqueue[T]) GetWithPriority() (_ T, priority int, shutdown bool) { + if w.shutdown.Load() { + var zero T + return zero, 0, true + } + + w.waiters.Add(1) + + w.notifyItemOrWaiterAdded() + + select { + case <-w.done: + // Return if the queue was shutdown while we were already waiting for an item here. + // For example controller workers are continuously calling GetWithPriority and + // GetWithPriority is blocking the workers if there are no items in the queue. + // If the controller and accordingly the queue is then shut down, without this code + // branch the controller workers remain blocked here and are unable to shut down. + var zero T + return zero, 0, true + case item := <-w.get: + return item.Key, item.Priority, w.shutdown.Load() + } +} + +func (w *priorityqueue[T]) Get() (item T, shutdown bool) { + key, _, shutdown := w.GetWithPriority() + return key, shutdown +} + +func (w *priorityqueue[T]) Forget(item T) { + w.rateLimiter.Forget(item) +} + +func (w *priorityqueue[T]) NumRequeues(item T) int { + return w.rateLimiter.NumRequeues(item) +} + +func (w *priorityqueue[T]) ShuttingDown() bool { + return w.shutdown.Load() +} + +func (w *priorityqueue[T]) Done(item T) { + w.lockedLock.Lock() + defer w.lockedLock.Unlock() + w.locked.Delete(item) + w.metrics.done(item) + w.notifyItemOrWaiterAdded() +} + +func (w *priorityqueue[T]) ShutDown() { + w.shutdown.Store(true) + close(w.done) +} + +// ShutDownWithDrain just calls ShutDown, as the draining +// functionality is not used by controller-runtime. +func (w *priorityqueue[T]) ShutDownWithDrain() { + w.ShutDown() +} + +// Len returns the number of items that are ready to be +// picked up. It does not include items that are not yet +// ready. +func (w *priorityqueue[T]) Len() int { + w.lock.Lock() + defer w.lock.Unlock() + + var result int + w.queue.Ascend(func(item *item[T]) bool { + if item.ReadyAt == nil || item.ReadyAt.Compare(w.now()) <= 0 { + result++ + return true + } + return false + }) + + return result +} + +func (w *priorityqueue[T]) logState() { + t := time.Tick(10 * time.Second) + for { + select { + case <-w.done: + return + case <-t: + } + + // Log level may change at runtime, so keep the + // loop going even if a given level is currently + // not enabled. + if !w.log.V(5).Enabled() { + continue + } + w.lock.Lock() + items := make([]*item[T], 0, len(w.items)) + w.queue.Ascend(func(item *item[T]) bool { + items = append(items, item) + return true + }) + w.lock.Unlock() + + w.log.V(5).Info("workqueue_items", "items", items) + } +} + +func less[T comparable](a, b *item[T]) bool { + if a.Priority != b.Priority { + return a.Priority > b.Priority + } + if a.ReadyAt == nil && b.ReadyAt != nil { + return true + } + if b.ReadyAt == nil && a.ReadyAt != nil { + return false + } + if a.ReadyAt != nil && b.ReadyAt != nil && !a.ReadyAt.Equal(*b.ReadyAt) { + return a.ReadyAt.Before(*b.ReadyAt) + } + + return a.AddedCounter < b.AddedCounter +} + +type item[T comparable] struct { + Key T `json:"key"` + AddedCounter uint64 `json:"addedCounter"` + Priority int `json:"priority"` + ReadyAt *time.Time `json:"readyAt,omitempty"` +} + +func (w *priorityqueue[T]) updateUnfinishedWorkLoop() { + t := time.Tick(500 * time.Millisecond) // borrowed from workqueue: https://github.com/kubernetes/kubernetes/blob/67a807bf142c7a2a5ecfdb2a5d24b4cdea4cc79c/staging/src/k8s.io/client-go/util/workqueue/queue.go#L182 + for { + select { + case <-w.done: + return + case <-t: + } + w.metrics.updateUnfinishedWork() + } +} + +type bTree[T any] interface { + ReplaceOrInsert(item T) (_ T, _ bool) + Delete(item T) (T, bool) + Ascend(iterator btree.ItemIteratorG[T]) + AscendGreaterOrEqual(pivot T, iterator btree.ItemIteratorG[T]) +} diff --git a/pkg/controller/priorityqueue/priorityqueue_suite_test.go b/pkg/controller/priorityqueue/priorityqueue_suite_test.go new file mode 100644 index 0000000000..71bc5ba049 --- /dev/null +++ b/pkg/controller/priorityqueue/priorityqueue_suite_test.go @@ -0,0 +1,13 @@ +package priorityqueue + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestControllerWorkqueue(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "ControllerWorkqueue Suite") +} diff --git a/pkg/controller/priorityqueue/priorityqueue_test.go b/pkg/controller/priorityqueue/priorityqueue_test.go new file mode 100644 index 0000000000..e18a6393eb --- /dev/null +++ b/pkg/controller/priorityqueue/priorityqueue_test.go @@ -0,0 +1,775 @@ +package priorityqueue + +import ( + "fmt" + "math/rand/v2" + "sync" + "testing" + "time" + + fuzz "github.com/google/gofuzz" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/util/workqueue" + "k8s.io/utils/ptr" +) + +var _ = Describe("Controllerworkqueue", func() { + It("returns an item", func() { + q, metrics := newQueue() + defer q.ShutDown() + q.AddWithOpts(AddOpts{}, "foo") + + item, _, _ := q.GetWithPriority() + Expect(item).To(Equal("foo")) + + Expect(metrics.depth["test"]).To(Equal(map[int]int{0: 0})) + Expect(metrics.adds["test"]).To(Equal(1)) + Expect(metrics.retries["test"]).To(Equal(0)) + }) + + It("returns items in order", func() { + q, metrics := newQueue() + defer q.ShutDown() + + q.AddWithOpts(AddOpts{}, "foo") + q.AddWithOpts(AddOpts{}, "bar") + + item, _, _ := q.GetWithPriority() + Expect(item).To(Equal("foo")) + item, _, _ = q.GetWithPriority() + Expect(item).To(Equal("bar")) + + Expect(metrics.depth["test"]).To(Equal(map[int]int{0: 0})) + Expect(metrics.adds["test"]).To(Equal(2)) + }) + + It("doesn't return an item that is currently locked", func() { + q, metrics := newQueue() + defer q.ShutDown() + + q.AddWithOpts(AddOpts{}, "foo") + + item, _, _ := q.GetWithPriority() + Expect(item).To(Equal("foo")) + + q.AddWithOpts(AddOpts{}, "foo") + q.AddWithOpts(AddOpts{}, "bar") + item, _, _ = q.GetWithPriority() + Expect(item).To(Equal("bar")) + + Expect(metrics.depth["test"]).To(Equal(map[int]int{0: 1})) + Expect(metrics.adds["test"]).To(Equal(3)) + }) + + It("returns an item as soon as its unlocked", func() { + q, metrics := newQueue() + defer q.ShutDown() + + q.AddWithOpts(AddOpts{}, "foo") + + item, _, _ := q.GetWithPriority() + Expect(item).To(Equal("foo")) + + q.AddWithOpts(AddOpts{}, "foo") + q.AddWithOpts(AddOpts{}, "bar") + item, _, _ = q.GetWithPriority() + Expect(item).To(Equal("bar")) + + q.AddWithOpts(AddOpts{}, "baz") + q.Done("foo") + item, _, _ = q.GetWithPriority() + Expect(item).To(Equal("foo")) + + Expect(metrics.depth["test"]).To(Equal(map[int]int{0: 1})) + Expect(metrics.adds["test"]).To(Equal(4)) + }) + + It("de-duplicates items", func() { + q, metrics := newQueue() + defer q.ShutDown() + + q.AddWithOpts(AddOpts{}, "foo") + q.AddWithOpts(AddOpts{}, "foo") + + Expect(q.Len()).To(Equal(1)) + + q.lockedLock.Lock() + Expect(q.locked.Len()).To(Equal(0)) + + Expect(metrics.depth["test"]).To(Equal(map[int]int{0: 1})) + Expect(metrics.adds["test"]).To(Equal(1)) + }) + + It("retains the highest priority", func() { + q, metrics := newQueue() + defer q.ShutDown() + + q.AddWithOpts(AddOpts{Priority: ptr.To(1)}, "foo") + q.AddWithOpts(AddOpts{Priority: ptr.To(2)}, "foo") + + item, priority, _ := q.GetWithPriority() + Expect(item).To(Equal("foo")) + Expect(priority).To(Equal(2)) + + Expect(q.Len()).To(Equal(0)) + + Expect(metrics.depth["test"]).To(Equal(map[int]int{1: 0, 2: 0})) + Expect(metrics.adds["test"]).To(Equal(1)) + }) + + It("gets pushed to the front if the priority increases", func() { + q, metrics := newQueue() + defer q.ShutDown() + + q.AddWithOpts(AddOpts{}, "foo") + q.AddWithOpts(AddOpts{}, "bar") + q.AddWithOpts(AddOpts{}, "baz") + q.AddWithOpts(AddOpts{Priority: ptr.To(1)}, "baz") + + item, priority, _ := q.GetWithPriority() + Expect(item).To(Equal("baz")) + Expect(priority).To(Equal(1)) + + Expect(q.Len()).To(Equal(2)) + + Expect(metrics.depth["test"]).To(Equal(map[int]int{0: 2, 1: 0})) + Expect(metrics.adds["test"]).To(Equal(3)) + }) + + It("retains the lowest after duration", func() { + q, metrics := newQueue() + defer q.ShutDown() + + q.AddWithOpts(AddOpts{After: 0}, "foo") + q.AddWithOpts(AddOpts{After: time.Hour}, "foo") + + item, priority, _ := q.GetWithPriority() + Expect(item).To(Equal("foo")) + Expect(priority).To(Equal(0)) + + Expect(q.Len()).To(Equal(0)) + Expect(metrics.depth["test"]).To(Equal(map[int]int{0: 0})) + Expect(metrics.adds["test"]).To(Equal(1)) + }) + + It("returns an item only after after has passed", func() { + q, metrics, forwardQueueTimeBy := newQueueWithTimeForwarder() + defer q.ShutDown() + + originalTick := q.tick + q.tick = func(d time.Duration) <-chan time.Time { + Expect(d).To(Equal(time.Second)) + return originalTick(d) + } + + retrievedItem := make(chan struct{}) + + go func() { + defer GinkgoRecover() + q.GetWithPriority() + close(retrievedItem) + }() + + q.AddWithOpts(AddOpts{After: time.Second}, "foo") + + Consistently(retrievedItem).ShouldNot(BeClosed()) + + forwardQueueTimeBy(time.Second) + Eventually(retrievedItem).Should(BeClosed()) + + Expect(metrics.depth["test"]).To(Equal(map[int]int{0: 0})) + Expect(metrics.adds["test"]).To(Equal(1)) + Expect(metrics.retries["test"]).To(Equal(1)) + }) + + It("returns high priority item that became ready before low priority item", func() { + q, metrics, forwardQueueTimeBy := newQueueWithTimeForwarder() + defer q.ShutDown() + + tickSetup := make(chan any) + originalTick := q.tick + q.tick = func(d time.Duration) <-chan time.Time { + Expect(d).To(Equal(time.Second)) + close(tickSetup) + return originalTick(d) + } + + lowPriority := -100 + highPriority := 0 + q.AddWithOpts(AddOpts{After: 0, Priority: &lowPriority}, "foo") + q.AddWithOpts(AddOpts{After: time.Second, Priority: &highPriority}, "prio") + + Eventually(tickSetup).Should(BeClosed()) + + forwardQueueTimeBy(1 * time.Second) + key, prio, _ := q.GetWithPriority() + + Expect(key).To(Equal("prio")) + Expect(prio).To(Equal(0)) + Expect(metrics.depth["test"]).To(Equal(map[int]int{-100: 1, 0: 0})) + Expect(metrics.adds["test"]).To(Equal(2)) + Expect(metrics.retries["test"]).To(Equal(1)) + }) + + It("returns an item to a waiter as soon as it has one", func() { + q, metrics := newQueue() + defer q.ShutDown() + + retrieved := make(chan struct{}) + go func() { + defer GinkgoRecover() + item, _, _ := q.GetWithPriority() + Expect(item).To(Equal("foo")) + close(retrieved) + }() + + // We are waiting for the GetWithPriority() call to be blocked + // on retrieving an item. As golang doesn't provide a way to + // check if something is listening on a channel without + // sending them a message, I can't think of a way to do this + // without sleeping. + time.Sleep(time.Second) + q.AddWithOpts(AddOpts{}, "foo") + Eventually(retrieved).Should(BeClosed()) + + Expect(metrics.depth["test"]).To(Equal(map[int]int{0: 0})) + Expect(metrics.adds["test"]).To(Equal(1)) + }) + + It("returns multiple items with after in correct order", func() { + q, metrics, forwardQueueTimeBy := newQueueWithTimeForwarder() + defer q.ShutDown() + + originalTick := q.tick + q.tick = func(d time.Duration) <-chan time.Time { + // What a bunch of bs. Deferring in here causes + // ginkgo to deadlock, presumably because it + // never returns after the defer. Not deferring + // hides the actual assertion result and makes + // it complain that there should be a defer. + // Move the assertion into a goroutine just to + // get around that mess. + done := make(chan struct{}) + go func() { + defer GinkgoRecover() + defer close(done) + + // This is not deterministic and depends on which of + // Add() or Spin() gets the lock first. + Expect(d).To(Or(Equal(200*time.Millisecond), Equal(time.Second))) + }() + <-done + return originalTick(d) + } + + retrievedItem := make(chan struct{}) + retrievedSecondItem := make(chan struct{}) + + go func() { + defer GinkgoRecover() + first, _, _ := q.GetWithPriority() + Expect(first).To(Equal("bar")) + close(retrievedItem) + + second, _, _ := q.GetWithPriority() + Expect(second).To(Equal("foo")) + close(retrievedSecondItem) + }() + + q.AddWithOpts(AddOpts{After: time.Second}, "foo") + q.AddWithOpts(AddOpts{After: 200 * time.Millisecond}, "bar") + + Consistently(retrievedItem).ShouldNot(BeClosed()) + + forwardQueueTimeBy(time.Second) + Eventually(retrievedItem).Should(BeClosed()) + Eventually(retrievedSecondItem).Should(BeClosed()) + + Expect(metrics.depth["test"]).To(Equal(map[int]int{0: 0})) + Expect(metrics.adds["test"]).To(Equal(2)) + }) + + It("doesn't include non-ready items in Len()", func() { + q, metrics := newQueue() + defer q.ShutDown() + + q.AddWithOpts(AddOpts{After: time.Minute}, "foo") + q.AddWithOpts(AddOpts{}, "baz") + q.AddWithOpts(AddOpts{After: time.Minute}, "bar") + q.AddWithOpts(AddOpts{}, "bal") + + Expect(q.Len()).To(Equal(2)) + Expect(metrics.depth).To(HaveLen(1)) + Expect(metrics.depth["test"]).To(Equal(map[int]int{0: 2})) + }) + + // ref: https://github.com/kubernetes-sigs/controller-runtime/issues/3239 + It("Get from priority queue might get stuck when the priority queue is shut down", func() { + q, _ := newQueue() + + q.Add("baz") + // shut down + q.ShutDown() + q.AddWithOpts(AddOpts{After: time.Second}, "foo") + + item, priority, isShutDown := q.GetWithPriority() + Expect(item).To(Equal("")) + Expect(priority).To(Equal(0)) + Expect(isShutDown).To(BeTrue()) + + item1, priority1, isShutDown := q.GetWithPriority() + Expect(item1).To(Equal("")) + Expect(priority1).To(Equal(0)) + Expect(isShutDown).To(BeTrue()) + }) + + It("Get from priority queue should get unblocked when the priority queue is shut down", func() { + q, _ := newQueue() + + getUnblocked := make(chan struct{}) + + go func() { + defer GinkgoRecover() + defer close(getUnblocked) + + item, priority, isShutDown := q.GetWithPriority() + Expect(item).To(Equal("")) + Expect(priority).To(Equal(0)) + Expect(isShutDown).To(BeTrue()) + }() + + // Verify the go routine above is now waiting for an item. + Eventually(q.waiters.Load).Should(Equal(int64(1))) + Consistently(getUnblocked).ShouldNot(BeClosed()) + + // shut down + q.ShutDown() + + // Verify the shutdown unblocked the go routine. + Eventually(getUnblocked).Should(BeClosed()) + }) + + It("items are included in Len() and the queueDepth metric once they are ready", func() { + q, metrics := newQueue() + defer q.ShutDown() + + q.AddWithOpts(AddOpts{After: 500 * time.Millisecond}, "foo") + q.AddWithOpts(AddOpts{}, "baz") + q.AddWithOpts(AddOpts{After: 500 * time.Millisecond}, "bar") + q.AddWithOpts(AddOpts{}, "bal") + + Expect(q.Len()).To(Equal(2)) + metrics.mu.Lock() + Expect(metrics.depth["test"]).To(Equal(map[int]int{0: 2})) + metrics.mu.Unlock() + time.Sleep(time.Second) + Expect(q.Len()).To(Equal(4)) + metrics.mu.Lock() + Expect(metrics.depth["test"]).To(Equal(map[int]int{0: 4})) + metrics.mu.Unlock() + + // Drain queue + for range 4 { + item, _ := q.Get() + q.Done(item) + } + Expect(q.Len()).To(Equal(0)) + metrics.mu.Lock() + Expect(metrics.depth["test"]).To(Equal(map[int]int{0: 0})) + metrics.mu.Unlock() + + // Validate that doing it again still works to notice bugs with removing + // it from the queues becameReady tracking. + q.AddWithOpts(AddOpts{After: 500 * time.Millisecond}, "foo") + q.AddWithOpts(AddOpts{}, "baz") + q.AddWithOpts(AddOpts{After: 500 * time.Millisecond}, "bar") + q.AddWithOpts(AddOpts{}, "bal") + + Expect(q.Len()).To(Equal(2)) + metrics.mu.Lock() + Expect(metrics.depth["test"]).To(Equal(map[int]int{0: 2})) + metrics.mu.Unlock() + time.Sleep(time.Second) + Expect(q.Len()).To(Equal(4)) + metrics.mu.Lock() + Expect(metrics.depth["test"]).To(Equal(map[int]int{0: 4})) + metrics.mu.Unlock() + }) + + It("returns many items", func() { + // This test ensures the queue is able to drain a large queue without panic'ing. + // In a previous version of the code we were calling queue.Delete within q.Ascend + // which led to a panic in queue.Ascend > iterate: + // "panic: runtime error: index out of range [0] with length 0" + q, _ := newQueue() + defer q.ShutDown() + + for range 20 { + for i := range 1000 { + rn := rand.N(100) + if rn < 10 { + q.AddWithOpts(AddOpts{After: time.Duration(rn) * time.Millisecond}, fmt.Sprintf("foo%d", i)) + } else { + q.AddWithOpts(AddOpts{Priority: &rn}, fmt.Sprintf("foo%d", i)) + } + } + + wg := sync.WaitGroup{} + for range 100 { // The panic only occurred relatively frequently with a high number of go routines. + wg.Add(1) + go func() { + defer wg.Done() + for range 10 { + obj, _, _ := q.GetWithPriority() + q.Done(obj) + } + }() + } + + wg.Wait() + } + }) + + It("updates metrics correctly for an item that gets initially added with after and then without", func() { + q, metrics := newQueue() + defer q.ShutDown() + + q.AddWithOpts(AddOpts{After: time.Hour}, "foo") + Expect(q.Len()).To(Equal(0)) + metrics.mu.Lock() + Expect(metrics.depth["test"]).To(Equal(map[int]int{})) + metrics.mu.Unlock() + + q.AddWithOpts(AddOpts{}, "foo") + + Expect(q.Len()).To(Equal(1)) + metrics.mu.Lock() + Expect(metrics.depth["test"]).To(Equal(map[int]int{0: 1})) + metrics.mu.Unlock() + + // Get the item to ensure the codepath in + // `spin` for the metrics is passed by so + // that this starts failing if it incorrectly + // calls `metrics.add` again. + item, _ := q.Get() + Expect(item).To(Equal("foo")) + Expect(q.Len()).To(Equal(0)) + metrics.mu.Lock() + Expect(metrics.depth["test"]).To(Equal(map[int]int{0: 0})) + metrics.mu.Unlock() + }) + + It("Updates metrics correctly for an item whose requeueAfter expired that gets added again without requeueAfter", func() { + q, metrics := newQueue() + defer q.ShutDown() + + q.AddWithOpts(AddOpts{After: 50 * time.Millisecond}, "foo") + time.Sleep(100 * time.Millisecond) + + Expect(q.Len()).To(Equal(1)) + metrics.mu.Lock() + Expect(metrics.depth["test"]).To(Equal(map[int]int{0: 1})) + metrics.mu.Unlock() + + q.AddWithOpts(AddOpts{}, "foo") + Expect(q.Len()).To(Equal(1)) + metrics.mu.Lock() + Expect(metrics.depth["test"]).To(Equal(map[int]int{0: 1})) + metrics.mu.Unlock() + + // Get the item to ensure the codepath in + // `spin` for the metrics is passed by so + // that this starts failing if it incorrectly + // calls `metrics.add` again. + item, _ := q.Get() + Expect(item).To(Equal("foo")) + Expect(q.Len()).To(Equal(0)) + metrics.mu.Lock() + Expect(metrics.depth["test"]).To(Equal(map[int]int{0: 0})) + metrics.mu.Unlock() + }) + + It("When adding items with rateLimit, previous items' rateLimit should not affect subsequent items", func() { + q, metrics, forwardQueueTimeBy := newQueueWithTimeForwarder() + defer q.ShutDown() + + q.rateLimiter = workqueue.NewTypedItemExponentialFailureRateLimiter[string](5*time.Millisecond, 1000*time.Second) + originalTick := q.tick + q.tick = func(d time.Duration) <-chan time.Time { + done := make(chan struct{}) + go func() { + defer GinkgoRecover() + defer close(done) + + Expect(d).To(Or(Equal(5*time.Millisecond), Equal(635*time.Millisecond))) + }() + <-done + return originalTick(d) + } + + retrievedItem := make(chan struct{}) + retrievedSecondItem := make(chan struct{}) + + go func() { + defer GinkgoRecover() + first, _, _ := q.GetWithPriority() + Expect(first).To(Equal("foo")) + close(retrievedItem) + + second, _, _ := q.GetWithPriority() + Expect(second).To(Equal("bar")) + close(retrievedSecondItem) + }() + + // after 7 calls, the next When("bar") call will return 640ms. + for range 7 { + q.rateLimiter.When("bar") + } + q.AddWithOpts(AddOpts{RateLimited: true}, "foo", "bar") + + Consistently(retrievedItem).ShouldNot(BeClosed()) + forwardQueueTimeBy(5 * time.Millisecond) + Eventually(retrievedItem).Should(BeClosed()) + + Consistently(retrievedSecondItem).ShouldNot(BeClosed()) + forwardQueueTimeBy(635 * time.Millisecond) + Eventually(retrievedSecondItem).Should(BeClosed()) + + Expect(metrics.depth["test"]).To(Equal(map[int]int{0: 0})) + Expect(metrics.adds["test"]).To(Equal(2)) + Expect(metrics.retries["test"]).To(Equal(2)) + }) +}) + +func BenchmarkAddGetDone(b *testing.B) { + q := New[int]("") + defer q.ShutDown() + b.ResetTimer() + for n := 0; n < b.N; n++ { + for i := 0; i < 1000; i++ { + q.Add(i) + } + for range 1000 { + item, _ := q.Get() + q.Done(item) + } + } +} + +func BenchmarkAddOnly(b *testing.B) { + q := New[int]("") + defer q.ShutDown() + b.ResetTimer() + for n := 0; n < b.N; n++ { + for i := 0; i < 1000; i++ { + q.Add(i) + } + } +} + +func BenchmarkAddLockContended(b *testing.B) { + q := New[int]("") + defer q.ShutDown() + go func() { + for range 1000 { + item, _ := q.Get() + q.Done(item) + } + }() + b.ResetTimer() + for n := 0; n < b.N; n++ { + for i := 0; i < 1000; i++ { + q.Add(i) + } + } +} + +// TestFuzzPrioriorityQueue validates a set of basic +// invariants that should always be true: +// +// - The queue is threadsafe when multiple producers and consumers +// are involved +// - There are no deadlocks +// - An item is never handed out again before it is returned +// - Items in the queue are de-duplicated +// - max(existing priority, new priority) is used +func TestFuzzPriorityQueue(t *testing.T) { + t.Parallel() + + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + f := fuzz.NewWithSeed(seed) + fuzzLock := sync.Mutex{} + fuzz := func(in any) { + fuzzLock.Lock() + defer fuzzLock.Unlock() + + f.Fuzz(in) + } + + inQueue := map[string]int{} + inQueueLock := sync.Mutex{} + + handedOut := sets.Set[string]{} + handedOutLock := sync.Mutex{} + + wg := sync.WaitGroup{} + q, metrics := newQueue() + + for range 10 { + wg.Add(1) + go func() { + defer wg.Done() + + for range 1000 { + opts, item := AddOpts{}, "" + + fuzz(&opts) + fuzz(&item) + + if opts.After > 100*time.Millisecond { + opts.After = 10 * time.Millisecond + } + opts.RateLimited = false + + func() { + inQueueLock.Lock() + defer inQueueLock.Unlock() + + q.AddWithOpts(opts, item) + if existingPriority, exists := inQueue[item]; !exists || existingPriority < ptr.Deref(opts.Priority, 0) { + inQueue[item] = ptr.Deref(opts.Priority, 0) + } + }() + } + }() + } + + for range 100 { + wg.Add(1) + + go func() { + defer wg.Done() + + for { + item, cont := func() (string, bool) { + inQueueLock.Lock() + defer inQueueLock.Unlock() + + if len(inQueue) == 0 { + return "", false + } + + item, priority, _ := q.GetWithPriority() + if expected := inQueue[item]; expected != priority { + t.Errorf("got priority %d, expected %d", priority, expected) + } + delete(inQueue, item) + return item, true + }() + + if !cont { + return + } + + func() { + handedOutLock.Lock() + defer handedOutLock.Unlock() + + if handedOut.Has(item) { + t.Errorf("item %s got handed out more than once", item) + } + + metrics.mu.Lock() + for priority, depth := range metrics.depth["test"] { + if depth < 0 { + t.Errorf("negative depth of %d for priority %d:", depth, priority) + } + } + + metrics.mu.Unlock() + handedOut.Insert(item) + }() + + func() { + handedOutLock.Lock() + defer handedOutLock.Unlock() + + handedOut.Delete(item) + q.Done(item) + }() + } + }() + } + + wg.Wait() +} + +func newQueueWithTimeForwarder() (_ *priorityqueue[string], _ *fakeMetricsProvider, forwardQueueTime func(time.Duration)) { + q, m := newQueue() + + now := time.Now().Round(time.Second) + nowLock := sync.Mutex{} + tick := make(chan time.Time) + + q.now = func() time.Time { + nowLock.Lock() + defer nowLock.Unlock() + return now + } + q.tick = func(d time.Duration) <-chan time.Time { + return tick + } + + return q, m, func(d time.Duration) { + nowLock.Lock() + now = now.Add(d) + nowLock.Unlock() + tick <- now + } +} + +func newQueue() (*priorityqueue[string], *fakeMetricsProvider) { + metrics := newFakeMetricsProvider() + q := New("test", func(o *Opts[string]) { + o.MetricProvider = metrics + }) + q.(*priorityqueue[string]).queue = &btreeInteractionValidator{ + bTree: q.(*priorityqueue[string]).queue, + } + + // validate that tick always gets a positive value as it will just return + // nil otherwise, which results in blocking forever. + upstreamTick := q.(*priorityqueue[string]).tick + q.(*priorityqueue[string]).tick = func(d time.Duration) <-chan time.Time { + if d <= 0 { + panic(fmt.Sprintf("got non-positive tick: %v", d)) + } + return upstreamTick(d) + } + return q.(*priorityqueue[string]), metrics +} + +type btreeInteractionValidator struct { + bTree[*item[string]] +} + +func (b *btreeInteractionValidator) ReplaceOrInsert(item *item[string]) (*item[string], bool) { + // There is no codepath that updates an item + item, alreadyExist := b.bTree.ReplaceOrInsert(item) + if alreadyExist { + panic(fmt.Sprintf("ReplaceOrInsert: item %v already existed", item)) + } + return item, alreadyExist +} + +func (b *btreeInteractionValidator) Delete(item *item[string]) (*item[string], bool) { + // There is no codepath that deletes an item that doesn't exist + old, existed := b.bTree.Delete(item) + if !existed { + panic(fmt.Sprintf("Delete: item %v not found", item)) + } + return old, existed +} diff --git a/pkg/controller/testdata/crds/unconventionallisttype.yaml b/pkg/controller/testdata/crds/unconventionallisttype.yaml new file mode 100644 index 0000000000..3069c473e5 --- /dev/null +++ b/pkg/controller/testdata/crds/unconventionallisttype.yaml @@ -0,0 +1,17 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: unconventionallisttypes.chaosapps.metamagical.io +spec: + group: chaosapps.metamagical.io + names: + kind: UnconventionalListType + plural: unconventionallisttypes + scope: Namespaced + versions: + - name: "v1" + storage: true + served: true + schema: + openAPIV3Schema: + type: object diff --git a/pkg/doc.go b/pkg/doc.go index d573953945..64693b4829 100644 --- a/pkg/doc.go +++ b/pkg/doc.go @@ -18,21 +18,21 @@ limitations under the License. Package pkg provides libraries for building Controllers. Controllers implement Kubernetes APIs and are foundational to building Operators, Workload APIs, Configuration APIs, Autoscalers, and more. -Client +# Client Client provides a Read + Write client for reading and writing Kubernetes objects. -Cache +# Cache Cache provides a Read client for reading objects from a local cache. A cache may register handlers to respond to events that update the cache. -Manager +# Manager Manager is required for creating a Controller and provides the Controller shared dependencies such as clients, caches, schemes, etc. Controllers should be Started through the Manager by calling Manager.Start. -Controller +# Controller Controller implements a Kubernetes API by responding to events (object Create, Update, Delete) and ensuring that the state specified in the Spec of the object matches the state of the system. This is called a reconcile. @@ -49,7 +49,7 @@ system must be read for each reconcile. * Controllers require Watches to be configured to enqueue reconcile.Requests in response to events. -Webhook +# Webhook Admission Webhooks are a mechanism for extending kubernetes APIs. Webhooks can be configured with target event type (object Create, Update, Delete), the API server will send AdmissionRequests to them @@ -62,7 +62,7 @@ Validating webhook is used to validate if an object meets certain requirements. * Admission Webhooks require Handler(s) to be provided to process the received AdmissionReview requests. -Reconciler +# Reconciler Reconciler is a function provided to a Controller that may be called at anytime with the Name and Namespace of an object. When called, the Reconciler will ensure that the state of the system matches what is specified in the object at the @@ -84,7 +84,7 @@ a mapping (e.g. owner references) that maps the object that triggers the reconci - e.g. it doesn't matter whether a ReplicaSet was created or updated, Reconciler will always compare the number of Pods in the system against what is specified in the object at the time it is called. -Source +# Source resource.Source is an argument to Controller.Watch that provides a stream of events. Events typically come from watching Kubernetes APIs (e.g. Pod Create, Update, Delete). @@ -97,9 +97,9 @@ through the Watch API. * Users SHOULD only use the provided Source implementations instead of implementing their own for nearly all cases. -EventHandler +# EventHandler -handler.EventHandler is a argument to Controller.Watch that enqueues reconcile.Requests in response to events. +handler.EventHandler is an argument to Controller.Watch that enqueues reconcile.Requests in response to events. Example: a Pod Create event from a Source is provided to the eventhandler.EnqueueHandler, which enqueues a reconcile.Request containing the name / Namespace of the Pod. @@ -117,7 +117,7 @@ type - e.g. map a Node event to objects that respond to cluster resize events. * Users SHOULD only use the provided EventHandler implementations instead of implementing their own for almost all cases. -Predicate +# Predicate predicate.Predicate is an optional argument to Controller.Watch that filters events. This allows common filters to be reused and composed. @@ -129,7 +129,7 @@ reused and composed. * Users SHOULD use the provided Predicate implementations, but MAY implement additional Predicates e.g. generation changed, label selectors changed etc. -PodController Diagram +# PodController Diagram Source provides event: @@ -137,20 +137,20 @@ Source provides event: EventHandler enqueues Request: -* &handler.EnqueueRequestForObject{} -> (reconcile.Request{types.NamespaceName{Name: "foo", Namespace: "bar"}}) +* &handler.EnqueueRequestForObject{} -> (reconcile.Request{types.NamespaceName{Namespace: "foo", Name: "bar"}}) Reconciler is called with the Request: -* Reconciler(reconcile.Request{types.NamespaceName{Name: "foo", Namespace: "bar"}}) +* Reconciler(reconcile.Request{types.NamespaceName{Namespace: "foo", Name: "bar"}}) -Usage +# Usage The following example shows creating a new Controller program which Reconciles ReplicaSet objects in response to Pod or ReplicaSet events. The Reconciler function simply adds a label to the ReplicaSet. -See the example/main.go for a usage example. +See the examples/builtins/main.go for a usage example. -Controller Example +Controller Example: 1. Watch ReplicaSet and Pods Sources @@ -159,7 +159,7 @@ Controller Example 1.2 Pod (created by ReplicaSet) -> handler.EnqueueRequestForOwnerHandler - enqueue a Request with the Owning ReplicaSet Namespace and Name. -2. reconcile ReplicaSet in response to an event +2. Reconcile ReplicaSet in response to an event 2.1 ReplicaSet object created -> Read ReplicaSet, try to read Pods -> if is missing create Pods. @@ -167,11 +167,11 @@ Owning ReplicaSet Namespace and Name. 2.3 Reconciler triggered by deletion of Pods from some other actor -> Read ReplicaSet and Pods, create replacement Pods. -Watching and EventHandling +# Watching and EventHandling Controllers may Watch multiple Kinds of objects (e.g. Pods, ReplicaSets and Deployments), but they reconcile only a single Type. When one Type of object must be updated in response to changes in another Type of object, -an EnqueueRequestFromMapFunc may be used to map events from one type to another. e.g. Respond to a cluster resize +an EnqueueRequestsFromMapFunc may be used to map events from one type to another. e.g. Respond to a cluster resize event (add / delete Node) by re-reconciling all instances of some API. A Deployment Controller might use an EnqueueRequestForObject and EnqueueRequestForOwner to: @@ -185,7 +185,7 @@ Note: reconcile.Requests are deduplicated when they are enqueued. Many Pod Even may trigger only 1 reconcile invocation as each Event results in the Handler trying to enqueue the same reconcile.Request for the ReplicaSet. -Controller Writing Tips +# Controller Writing Tips Reconciler Runtime Complexity: diff --git a/pkg/envtest/binaries.go b/pkg/envtest/binaries.go new file mode 100644 index 0000000000..5110d32658 --- /dev/null +++ b/pkg/envtest/binaries.go @@ -0,0 +1,387 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "context" + "crypto/sha512" + "encoding/hex" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "runtime" + "strings" + + "k8s.io/apimachinery/pkg/util/version" + "sigs.k8s.io/yaml" +) + +// DefaultBinaryAssetsIndexURL is the default index used in HTTPClient. +var DefaultBinaryAssetsIndexURL = "https://raw.githubusercontent.com/kubernetes-sigs/controller-tools/HEAD/envtest-releases.yaml" + +// SetupEnvtestDefaultBinaryAssetsDirectory returns the default location that setup-envtest uses to store envtest binaries. +// Setting BinaryAssetsDirectory to this directory allows sharing envtest binaries with setup-envtest. +// +// The directory is dependent on operating system: +// +// - Windows: %LocalAppData%\kubebuilder-envtest +// - OSX: ~/Library/Application Support/io.kubebuilder.envtest +// - Others: ${XDG_DATA_HOME:-~/.local/share}/kubebuilder-envtest +// +// Otherwise, it errors out. Note that these paths must not be relied upon +// manually. +func SetupEnvtestDefaultBinaryAssetsDirectory() (string, error) { + var baseDir string + + // find the base data directory + switch runtime.GOOS { + case "windows": + baseDir = os.Getenv("LocalAppData") + if baseDir == "" { + return "", errors.New("%LocalAppData% is not defined") + } + case "darwin": + homeDir := os.Getenv("HOME") + if homeDir == "" { + return "", errors.New("$HOME is not defined") + } + baseDir = filepath.Join(homeDir, "Library/Application Support") + default: + baseDir = os.Getenv("XDG_DATA_HOME") + if baseDir == "" { + homeDir := os.Getenv("HOME") + if homeDir == "" { + return "", errors.New("neither $XDG_DATA_HOME nor $HOME are defined") + } + baseDir = filepath.Join(homeDir, ".local/share") + } + } + + // append our program-specific dir to it (OSX has a slightly different + // convention so try to follow that). + switch runtime.GOOS { + case "darwin", "ios": + return filepath.Join(baseDir, "io.kubebuilder.envtest", "k8s"), nil + default: + return filepath.Join(baseDir, "kubebuilder-envtest", "k8s"), nil + } +} + +// index represents an index of envtest binary archives. Example: +// +// releases: +// v1.28.0: +// envtest-v1.28.0-darwin-amd64.tar.gz: +// hash: +// selfLink: +type index struct { + // Releases maps Kubernetes versions to Releases (envtest archives). + Releases map[string]release `json:"releases"` +} + +// release maps an archive name to an archive. +type release map[string]archive + +// archive contains the self link to an archive and its hash. +type archive struct { + Hash string `json:"hash"` + SelfLink string `json:"selfLink"` +} + +// parseKubernetesVersion returns: +// 1. the SemVer form of s when it refers to a specific Kubernetes release, or +// 2. the major and minor portions of s when it refers to a release series, or +// 3. an error +func parseKubernetesVersion(s string) (exact string, major, minor uint, err error) { + if v, err := version.ParseSemantic(s); err == nil { + return v.String(), 0, 0, nil + } + + // See two parseable components and nothing else. + if v, err := version.ParseGeneric(s); err == nil && len(v.Components()) == 2 { + if v.String() == strings.TrimPrefix(s, "v") { + return "", v.Major(), v.Minor(), nil + } + } + + return "", 0, 0, fmt.Errorf("could not parse %q as version", s) +} + +func downloadBinaryAssets(ctx context.Context, binaryAssetsDirectory, binaryAssetsVersion, binaryAssetsIndexURL string) (string, string, string, error) { + if binaryAssetsIndexURL == "" { + binaryAssetsIndexURL = DefaultBinaryAssetsIndexURL + } + + downloadRootDir := binaryAssetsDirectory + if downloadRootDir == "" { + var err error + if downloadRootDir, err = os.MkdirTemp("", "envtest-binaries-"); err != nil { + return "", "", "", fmt.Errorf("failed to create tmp directory for envtest binaries: %w", err) + } + } + + var binaryAssetsIndex *index + switch exact, major, minor, err := parseKubernetesVersion(binaryAssetsVersion); { + case binaryAssetsVersion != "" && err != nil: + return "", "", "", err + + case binaryAssetsVersion != "" && exact != "": + // Look for these specific binaries locally before downloading them from the release index. + // Use the canonical form of the version from here on. + binaryAssetsVersion = "v" + exact + + case binaryAssetsVersion == "" || major != 0 || minor != 0: + // Select a stable version from the release index before continuing. + binaryAssetsIndex, err = getIndex(ctx, binaryAssetsIndexURL) + if err != nil { + return "", "", "", err + } + + binaryAssetsVersion, err = latestStableVersionFromIndex(binaryAssetsIndex, major, minor) + if err != nil { + return "", "", "", err + } + } + + // Storing the envtest binaries in a directory structure that is compatible with setup-envtest. + // This makes it possible to share the envtest binaries with setup-envtest if the BinaryAssetsDirectory is set to SetupEnvtestDefaultBinaryAssetsDirectory(). + downloadDir := path.Join(downloadRootDir, fmt.Sprintf("%s-%s-%s", strings.TrimPrefix(binaryAssetsVersion, "v"), runtime.GOOS, runtime.GOARCH)) + if !fileExists(downloadDir) { + if err := os.MkdirAll(downloadDir, 0700); err != nil { + return "", "", "", fmt.Errorf("failed to create directory %q for envtest binaries: %w", downloadDir, err) + } + } + + apiServerPath := path.Join(downloadDir, "kube-apiserver") + etcdPath := path.Join(downloadDir, "etcd") + kubectlPath := path.Join(downloadDir, "kubectl") + + if fileExists(apiServerPath) && fileExists(etcdPath) && fileExists(kubectlPath) { + // Nothing to do if the binaries already exist. + return apiServerPath, etcdPath, kubectlPath, nil + } + + // Get Index if we didn't have to get it above to get the latest stable version. + if binaryAssetsIndex == nil { + var err error + binaryAssetsIndex, err = getIndex(ctx, binaryAssetsIndexURL) + if err != nil { + return "", "", "", err + } + } + + buf := &bytes.Buffer{} + if err := downloadBinaryAssetsArchive(ctx, binaryAssetsIndex, binaryAssetsVersion, buf); err != nil { + return "", "", "", err + } + + gzStream, err := gzip.NewReader(buf) + if err != nil { + return "", "", "", fmt.Errorf("failed to create gzip reader to extract envtest binaries: %w", err) + } + tarReader := tar.NewReader(gzStream) + + var header *tar.Header + for header, err = tarReader.Next(); err == nil; header, err = tarReader.Next() { + if header.Typeflag != tar.TypeReg { + // Skip non-regular file entry in archive. + continue + } + + // Just dump all files directly into the download directory, ignoring the prefixed directory paths. + // We also ignore bits for the most part (except for X). + fileName := filepath.Base(header.Name) + perms := 0555 & header.Mode // make sure we're at most r+x + + // Setting O_EXCL to get an error if the file already exists. + f, err := os.OpenFile(path.Join(downloadDir, fileName), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_TRUNC, os.FileMode(perms)) + if err != nil { + if os.IsExist(err) { + // Nothing to do if the file already exists. We assume another process created the file concurrently. + continue + } + return "", "", "", fmt.Errorf("failed to create file %s in directory %s: %w", fileName, downloadDir, err) + } + if err := func() error { + defer f.Close() + if _, err := io.Copy(f, tarReader); err != nil { + return fmt.Errorf("failed to write file %s in directory %s: %w", fileName, downloadDir, err) + } + return nil + }(); err != nil { + return "", "", "", fmt.Errorf("failed to close file %s in directory %s: %w", fileName, downloadDir, err) + } + } + + return apiServerPath, etcdPath, kubectlPath, nil +} + +func fileExists(path string) bool { + if _, err := os.Stat(path); err == nil { + return true + } + return false +} + +func downloadBinaryAssetsArchive(ctx context.Context, index *index, version string, out io.Writer) error { + archives, ok := index.Releases[version] + if !ok { + return fmt.Errorf("failed to find envtest binaries for version %s", version) + } + + archiveName := fmt.Sprintf("envtest-%s-%s-%s.tar.gz", version, runtime.GOOS, runtime.GOARCH) + archive, ok := archives[archiveName] + if !ok { + return fmt.Errorf("failed to find envtest binaries for version %s with archiveName %s", version, archiveName) + } + + archiveURL, err := url.Parse(archive.SelfLink) + if err != nil { + return fmt.Errorf("failed to parse envtest binaries archive URL %q: %w", archiveURL, err) + } + + req, err := http.NewRequestWithContext(ctx, "GET", archiveURL.String(), nil) + if err != nil { + return fmt.Errorf("failed to create request to download %s: %w", archiveURL.String(), err) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("failed to download %s: %w", archiveURL.String(), err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return fmt.Errorf("failed to download %s, got status %q", archiveURL.String(), resp.Status) + } + + return readBody(resp, out, archiveName, archive.Hash) +} + +// latestStableVersionFromIndex returns the version with highest [precedence] in index that is not a prerelease. +// When either major or minor are not zero, the returned version will have those major and minor versions. +// Note that the version cannot be limited to 0.0.x this way. +// +// It is an error when there is no appropriate version in index. +// +// [precedence]: https://semver.org/spec/v2.0.0.html#spec-item-11 +func latestStableVersionFromIndex(index *index, major, minor uint) (string, error) { + if len(index.Releases) == 0 { + return "", fmt.Errorf("failed to find latest stable version from index: index is empty") + } + + var found *version.Version + for releaseVersion := range index.Releases { + v, err := version.ParseSemantic(releaseVersion) + if err != nil { + return "", fmt.Errorf("failed to parse version %q: %w", releaseVersion, err) + } + + // Filter out pre-releases. + if len(v.PreRelease()) > 0 { + continue + } + + // Filter on release series, if any. + if (major != 0 || minor != 0) && (v.Major() != major || v.Minor() != minor) { + continue + } + + if found == nil || v.GreaterThan(found) { + found = v + } + } + + if found == nil { + search := "any" + if major != 0 || minor != 0 { + search = fmt.Sprint(major, ".", minor) + } + + return "", fmt.Errorf("failed to find latest stable version from index: index does not have %s stable versions", search) + } + + return "v" + found.String(), nil +} + +func getIndex(ctx context.Context, indexURL string) (*index, error) { + loc, err := url.Parse(indexURL) + if err != nil { + return nil, fmt.Errorf("unable to parse index URL: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) + if err != nil { + return nil, fmt.Errorf("unable to construct request to get index: %w", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("unable to perform request to get index: %w", err) + } + + defer resp.Body.Close() + if resp.StatusCode != 200 { + return nil, fmt.Errorf("unable to get index -- got status %q", resp.Status) + } + + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to get index -- unable to read body %w", err) + } + + var index index + if err := yaml.Unmarshal(responseBody, &index); err != nil { + return nil, fmt.Errorf("unable to unmarshal index: %w", err) + } + return &index, nil +} + +func readBody(resp *http.Response, out io.Writer, archiveName string, expectedHash string) error { + // Stream in chunks to do the checksum + buf := make([]byte, 32*1024) // 32KiB, same as io.Copy + hasher := sha512.New() + + for cont := true; cont; { + amt, err := resp.Body.Read(buf) + if err != nil && !errors.Is(err, io.EOF) { + return fmt.Errorf("unable read next chunk of %s: %w", archiveName, err) + } + if amt > 0 { + // checksum never returns errors according to docs + hasher.Write(buf[:amt]) + if _, err := out.Write(buf[:amt]); err != nil { + return fmt.Errorf("unable write next chunk of %s: %w", archiveName, err) + } + } + cont = amt > 0 && !errors.Is(err, io.EOF) + } + + actualHash := hex.EncodeToString(hasher.Sum(nil)) + if actualHash != expectedHash { + return fmt.Errorf("checksum mismatch for %s: %s (computed) != %s (expected)", archiveName, actualHash, expectedHash) + } + + return nil +} diff --git a/pkg/envtest/binaries_test.go b/pkg/envtest/binaries_test.go new file mode 100644 index 0000000000..aa83963381 --- /dev/null +++ b/pkg/envtest/binaries_test.go @@ -0,0 +1,357 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/rand" + "crypto/sha512" + "encoding/hex" + "fmt" + "net/http" + "os" + "path" + "runtime" + "strings" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/ghttp" + "sigs.k8s.io/yaml" +) + +func TestParseKubernetesVersion(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + inputs []string + + expectError string + expectExact bool + expectSeriesMajor uint + expectSeriesMinor uint + }{ + { + name: `SemVer and "v" prefix are exact`, + inputs: []string{ + "1.2.3", "v1.2.3", "v1.30.2", "v1.31.0-beta.0", "v1.33.0-alpha.2", + }, + expectExact: true, + }, + { + name: "empty string is not a version", + inputs: []string{""}, + expectError: "could not parse", + }, + { + name: "leading zeroes are not a version", + inputs: []string{ + "01.2.0", "00001.2.3", "1.2.03", "v01.02.0003", + }, + expectError: "could not parse", + }, + { + name: "weird stuff is not a version", + inputs: []string{ + "asdf", "version", "vegeta4", "the.1", "2ne1", "=7.8.9", "10.x", "*", + "0.0001", "1.00002", "v1.2anything", "1.2.x", "1.2.z", "1.2.*", + }, + expectError: "could not parse", + }, + { + name: "one number is not a version", + inputs: []string{ + "1", "v1", "v001", "1.", "v1.", "1.x", + }, + expectError: "could not parse", + }, + { + name: "two numbers are a release series", + inputs: []string{"0.1", "v0.1"}, + + expectSeriesMajor: 0, + expectSeriesMinor: 1, + }, + { + name: "two numbers are a release series", + inputs: []string{"1.2", "v1.2"}, + + expectSeriesMajor: 1, + expectSeriesMinor: 2, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + for _, input := range tc.inputs { + exact, major, minor, err := parseKubernetesVersion(input) + + if tc.expectError != "" && err == nil { + t.Errorf("expected error %q, got none", tc.expectError) + } + if tc.expectError != "" && !strings.Contains(err.Error(), tc.expectError) { + t.Errorf("expected error %q, got %q", tc.expectError, err) + } + if tc.expectError == "" && err != nil { + t.Errorf("expected no error, got %q", err) + continue + } + + if tc.expectExact { + if expected := strings.TrimPrefix(input, "v"); exact != expected { + t.Errorf("expected canonical %q for %q, got %q", expected, input, exact) + } + if major != 0 || minor != 0 { + t.Errorf("expected no release series for %q, got (%v, %v)", input, major, minor) + } + continue + } + + if major != tc.expectSeriesMajor { + t.Errorf("expected major %v for %q, got %v", tc.expectSeriesMajor, input, major) + } + if minor != tc.expectSeriesMinor { + t.Errorf("expected minor %v for %q, got %v", tc.expectSeriesMinor, input, minor) + } + if exact != "" { + t.Errorf("expected no canonical version for %q, got %q", input, exact) + } + } + }) + } +} + +var _ = Describe("Test download binaries", func() { + var downloadDirectory string + var server *ghttp.Server + + BeforeEach(func() { + downloadDirectory = GinkgoT().TempDir() + + server = ghttp.NewServer() + DeferCleanup(func() { + server.Close() + }) + setupServer(server) + }) + + It("should download binaries of latest stable version", func(ctx SpecContext) { + apiServerPath, etcdPath, kubectlPath, err := downloadBinaryAssets(ctx, downloadDirectory, "", fmt.Sprintf("http://%s/%s", server.Addr(), "envtest-releases.yaml")) + Expect(err).ToNot(HaveOccurred()) + + // Verify latest stable version (v1.32.0) was downloaded + versionDownloadDirectory := path.Join(downloadDirectory, fmt.Sprintf("1.32.0-%s-%s", runtime.GOOS, runtime.GOARCH)) + Expect(apiServerPath).To(Equal(path.Join(versionDownloadDirectory, "kube-apiserver"))) + Expect(etcdPath).To(Equal(path.Join(versionDownloadDirectory, "etcd"))) + Expect(kubectlPath).To(Equal(path.Join(versionDownloadDirectory, "kubectl"))) + + dirEntries, err := os.ReadDir(versionDownloadDirectory) + Expect(err).ToNot(HaveOccurred()) + var actualFiles []string + for _, e := range dirEntries { + actualFiles = append(actualFiles, e.Name()) + } + Expect(actualFiles).To(ConsistOf("some-file")) + }) + + It("should download binaries of an exact version", func(ctx SpecContext) { + apiServerPath, etcdPath, kubectlPath, err := downloadBinaryAssets(ctx, downloadDirectory, "v1.31.0", fmt.Sprintf("http://%s/%s", server.Addr(), "envtest-releases.yaml")) + Expect(err).ToNot(HaveOccurred()) + + // Verify exact version (v1.31.0) was downloaded + versionDownloadDirectory := path.Join(downloadDirectory, fmt.Sprintf("1.31.0-%s-%s", runtime.GOOS, runtime.GOARCH)) + Expect(apiServerPath).To(Equal(path.Join(versionDownloadDirectory, "kube-apiserver"))) + Expect(etcdPath).To(Equal(path.Join(versionDownloadDirectory, "etcd"))) + Expect(kubectlPath).To(Equal(path.Join(versionDownloadDirectory, "kubectl"))) + + dirEntries, err := os.ReadDir(versionDownloadDirectory) + Expect(err).ToNot(HaveOccurred()) + var actualFiles []string + for _, e := range dirEntries { + actualFiles = append(actualFiles, e.Name()) + } + Expect(actualFiles).To(ConsistOf("some-file")) + }) + + It("should download binaries of latest stable version of a release series", func(ctx SpecContext) { + apiServerPath, etcdPath, kubectlPath, err := downloadBinaryAssets(ctx, downloadDirectory, "1.31", fmt.Sprintf("http://%s/%s", server.Addr(), "envtest-releases.yaml")) + Expect(err).ToNot(HaveOccurred()) + + // Verify stable version (v1.31.4) was downloaded + versionDownloadDirectory := path.Join(downloadDirectory, fmt.Sprintf("1.31.4-%s-%s", runtime.GOOS, runtime.GOARCH)) + Expect(apiServerPath).To(Equal(path.Join(versionDownloadDirectory, "kube-apiserver"))) + Expect(etcdPath).To(Equal(path.Join(versionDownloadDirectory, "etcd"))) + Expect(kubectlPath).To(Equal(path.Join(versionDownloadDirectory, "kubectl"))) + + dirEntries, err := os.ReadDir(versionDownloadDirectory) + Expect(err).ToNot(HaveOccurred()) + var actualFiles []string + for _, e := range dirEntries { + actualFiles = append(actualFiles, e.Name()) + } + Expect(actualFiles).To(ConsistOf("some-file")) + }) + + It("should error when the asset version is not a version", func(ctx SpecContext) { + _, _, _, err := downloadBinaryAssets(ctx, downloadDirectory, "wonky", fmt.Sprintf("http://%s/%s", server.Addr(), "envtest-releases.yaml")) + Expect(err).To(MatchError(`could not parse "wonky" as version`)) + }) + + It("should error when the asset version is not in the index", func(ctx SpecContext) { + _, _, _, err := downloadBinaryAssets(ctx, downloadDirectory, "v1.5.0", fmt.Sprintf("http://%s/%s", server.Addr(), "envtest-releases.yaml")) + Expect(err).To(MatchError("failed to find envtest binaries for version v1.5.0")) + + _, _, _, err = downloadBinaryAssets(ctx, downloadDirectory, "v1.5", fmt.Sprintf("http://%s/%s", server.Addr(), "envtest-releases.yaml")) + Expect(err).To(MatchError("failed to find latest stable version from index: index does not have 1.5 stable versions")) + }) +}) + +var ( + envtestBinaryArchives = index{ + Releases: map[string]release{ + "v1.32.0": map[string]archive{ + "envtest-v1.32.0-darwin-amd64.tar.gz": {}, + "envtest-v1.32.0-darwin-arm64.tar.gz": {}, + "envtest-v1.32.0-linux-amd64.tar.gz": {}, + "envtest-v1.32.0-linux-arm64.tar.gz": {}, + "envtest-v1.32.0-linux-ppc64le.tar.gz": {}, + "envtest-v1.32.0-linux-s390x.tar.gz": {}, + "envtest-v1.32.0-windows-amd64.tar.gz": {}, + }, + "v1.31.4": map[string]archive{ + "envtest-v1.31.4-darwin-amd64.tar.gz": {}, + "envtest-v1.31.4-darwin-arm64.tar.gz": {}, + "envtest-v1.31.4-linux-amd64.tar.gz": {}, + "envtest-v1.31.4-linux-arm64.tar.gz": {}, + "envtest-v1.31.4-linux-ppc64le.tar.gz": {}, + "envtest-v1.31.4-linux-s390x.tar.gz": {}, + "envtest-v1.31.4-windows-amd64.tar.gz": {}, + }, + "v1.31.0": map[string]archive{ + "envtest-v1.31.0-darwin-amd64.tar.gz": {}, + "envtest-v1.31.0-darwin-arm64.tar.gz": {}, + "envtest-v1.31.0-linux-amd64.tar.gz": {}, + "envtest-v1.31.0-linux-arm64.tar.gz": {}, + "envtest-v1.31.0-linux-ppc64le.tar.gz": {}, + "envtest-v1.31.0-linux-s390x.tar.gz": {}, + "envtest-v1.31.0-windows-amd64.tar.gz": {}, + }, + }, + } +) + +func setupServer(server *ghttp.Server) { + itemsHTTP := makeArchives(envtestBinaryArchives) + + // The index from itemsHTTP contains only relative SelfLinks. + // finalIndex will contain the full links based on server.Addr(). + finalIndex := index{ + Releases: map[string]release{}, + } + + for releaseVersion, releases := range itemsHTTP.index.Releases { + finalIndex.Releases[releaseVersion] = release{} + + for archiveName, a := range releases { + finalIndex.Releases[releaseVersion][archiveName] = archive{ + Hash: a.Hash, + SelfLink: fmt.Sprintf("http://%s/%s", server.Addr(), a.SelfLink), + } + content := itemsHTTP.contents[archiveName] + + // Note: Using the relative path from archive here instead of the full path. + server.RouteToHandler("GET", "/"+a.SelfLink, func(resp http.ResponseWriter, req *http.Request) { + resp.WriteHeader(http.StatusOK) + Expect(resp.Write(content)).To(Equal(len(content))) + }) + } + } + + indexYAML, err := yaml.Marshal(finalIndex) + Expect(err).ToNot(HaveOccurred()) + + server.RouteToHandler("GET", "/envtest-releases.yaml", ghttp.RespondWith( + http.StatusOK, + indexYAML, + )) +} + +type itemsHTTP struct { + index index + contents map[string][]byte +} + +func makeArchives(i index) itemsHTTP { + // This creates a new copy of the index so modifying the index + // in some tests doesn't affect others. + res := itemsHTTP{ + index: index{ + Releases: map[string]release{}, + }, + contents: map[string][]byte{}, + } + + for releaseVersion, releases := range i.Releases { + res.index.Releases[releaseVersion] = release{} + for archiveName := range releases { + var chunk [1024 * 48]byte // 1.5 times our chunk read size in GetVersion + copy(chunk[:], archiveName) + if _, err := rand.Read(chunk[len(archiveName):]); err != nil { + panic(err) + } + content, hash := makeArchive(chunk[:]) + + res.index.Releases[releaseVersion][archiveName] = archive{ + Hash: hash, + // Note: Only storing the name of the archive for now. + // This will be expanded later to a full URL once the server is running. + SelfLink: archiveName, + } + res.contents[archiveName] = content + } + } + return res +} + +func makeArchive(contents []byte) ([]byte, string) { + out := new(bytes.Buffer) + gzipWriter := gzip.NewWriter(out) + tarWriter := tar.NewWriter(gzipWriter) + err := tarWriter.WriteHeader(&tar.Header{ + Name: "controller-tools/envtest/some-file", + Size: int64(len(contents)), + Mode: 0777, // so we can check that we fix this later + }) + if err != nil { + panic(err) + } + _, err = tarWriter.Write(contents) + if err != nil { + panic(err) + } + tarWriter.Close() + gzipWriter.Close() + content := out.Bytes() + // controller-tools is using sha512 + hash := sha512.Sum512(content) + hashEncoded := hex.EncodeToString(hash[:]) + return content, hashEncoded +} diff --git a/pkg/envtest/crd.go b/pkg/envtest/crd.go index 375994ab5a..8ed2224cfe 100644 --- a/pkg/envtest/crd.go +++ b/pkg/envtest/crd.go @@ -19,111 +19,145 @@ package envtest import ( "bufio" "bytes" + "context" + "errors" + "fmt" "io" - "io/ioutil" "os" "path/filepath" "time" - apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" k8syaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" + "k8s.io/client-go/util/retry" + "k8s.io/utils/ptr" "sigs.k8s.io/yaml" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/conversion" ) -// CRDInstallOptions are the options for installing CRDs +// CRDInstallOptions are the options for installing CRDs. type CRDInstallOptions struct { - // Paths is the path to the directory containing CRDs + // Scheme is used to determine if conversion webhooks should be enabled + // for a particular CRD / object. + // + // Conversion webhooks are going to be enabled if an object in the scheme + // implements Hub and Spoke conversions. + // + // If nil, scheme.Scheme is used. + Scheme *runtime.Scheme + + // Paths is a list of paths to the directories or files containing CRDs Paths []string // CRDs is a list of CRDs to install - CRDs []*apiextensionsv1beta1.CustomResourceDefinition + CRDs []*apiextensionsv1.CustomResourceDefinition // ErrorIfPathMissing will cause an error if a Path does not exist ErrorIfPathMissing bool - // maxTime is the max time to wait - maxTime time.Duration + // MaxTime is the max time to wait + MaxTime time.Duration + + // PollInterval is the interval to check + PollInterval time.Duration + + // CleanUpAfterUse will cause the CRDs listed for installation to be + // uninstalled when terminating the test environment. + // Defaults to false. + CleanUpAfterUse bool - // pollInterval is the interval to check - pollInterval time.Duration + // WebhookOptions contains the conversion webhook information to install + // on the CRDs. This field is usually inherited by the EnvTest options. + // + // If you're passing this field manually, you need to make sure that + // the CA information and host port is filled in properly. + WebhookOptions WebhookInstallOptions } -const defaultPollInterval = 100 * time.Millisecond -const defaultMaxWait = 10 * time.Second +const ( + defaultPollInterval = 100 * time.Millisecond + defaultMaxWait = 10 * time.Second +) -// InstallCRDs installs a collection of CRDs into a cluster by reading the crd yaml files from a directory -func InstallCRDs(config *rest.Config, options CRDInstallOptions) ([]*apiextensionsv1beta1.CustomResourceDefinition, error) { +// InstallCRDs installs a collection of CRDs into a cluster by reading the crd yaml files from a directory. +func InstallCRDs(config *rest.Config, options CRDInstallOptions) ([]*apiextensionsv1.CustomResourceDefinition, error) { defaultCRDOptions(&options) // Read the CRD yamls into options.CRDs - if err := readCRDFiles(&options); err != nil { + if err := ReadCRDFiles(&options); err != nil { + return nil, fmt.Errorf("unable to read CRD files: %w", err) + } + + if err := modifyConversionWebhooks(options.CRDs, options.Scheme, options.WebhookOptions); err != nil { return nil, err } // Create the CRDs in the apiserver if err := CreateCRDs(config, options.CRDs); err != nil { - return options.CRDs, err + return options.CRDs, fmt.Errorf("unable to create CRD instances: %w", err) } // Wait for the CRDs to appear as Resources in the apiserver if err := WaitForCRDs(config, options.CRDs, options); err != nil { - return options.CRDs, err + return options.CRDs, fmt.Errorf("something went wrong waiting for CRDs to appear as API resources: %w", err) } return options.CRDs, nil } -// readCRDFiles reads the directories of CRDs in options.Paths and adds the CRD structs to options.CRDs -func readCRDFiles(options *CRDInstallOptions) error { +// ReadCRDFiles reads the directories of CRDs in options.Paths and adds the CRD structs to options.CRDs. +func ReadCRDFiles(options *CRDInstallOptions) error { if len(options.Paths) > 0 { - for _, path := range options.Paths { - if _, err := os.Stat(path); !options.ErrorIfPathMissing && os.IsNotExist(err) { - continue - } - new, err := readCRDs(path) - if err != nil { - return err - } - options.CRDs = append(options.CRDs, new...) + crdList, err := renderCRDs(options) + if err != nil { + return err } + + options.CRDs = append(options.CRDs, crdList...) } return nil } -// defaultCRDOptions sets the default values for CRDs +// defaultCRDOptions sets the default values for CRDs. func defaultCRDOptions(o *CRDInstallOptions) { - if o.maxTime == 0 { - o.maxTime = defaultMaxWait + if o.Scheme == nil { + o.Scheme = scheme.Scheme } - if o.pollInterval == 0 { - o.pollInterval = defaultPollInterval + if o.MaxTime == 0 { + o.MaxTime = defaultMaxWait + } + if o.PollInterval == 0 { + o.PollInterval = defaultPollInterval } } -// WaitForCRDs waits for the CRDs to appear in discovery -func WaitForCRDs(config *rest.Config, crds []*apiextensionsv1beta1.CustomResourceDefinition, options CRDInstallOptions) error { +// WaitForCRDs waits for the CRDs to appear in discovery. +func WaitForCRDs(config *rest.Config, crds []*apiextensionsv1.CustomResourceDefinition, options CRDInstallOptions) error { // Add each CRD to a map of GroupVersion to Resource - waitingFor := map[schema.GroupVersion]*sets.String{} + waitingFor := map[schema.GroupVersion]*sets.Set[string]{} for _, crd := range crds { gvs := []schema.GroupVersion{} - if crd.Spec.Version != "" { - gvs = append(gvs, schema.GroupVersion{Group: crd.Spec.Group, Version: crd.Spec.Version}) - } - for _, ver := range crd.Spec.Versions { - if ver.Served { - gvs = append(gvs, schema.GroupVersion{Group: crd.Spec.Group, Version: ver.Name}) + for _, version := range crd.Spec.Versions { + if version.Served { + gvs = append(gvs, schema.GroupVersion{Group: crd.Spec.Group, Version: version.Name}) } } + for _, gv := range gvs { log.V(1).Info("adding API in waitlist", "GV", gv) if _, found := waitingFor[gv]; !found { // Initialize the set - waitingFor[gv] = &sets.String{} + waitingFor[gv] = &sets.Set[string]{} } // Add the Resource waitingFor[gv].Insert(crd.Spec.Names.Plural) @@ -132,20 +166,20 @@ func WaitForCRDs(config *rest.Config, crds []*apiextensionsv1beta1.CustomResourc // Poll until all resources are found in discovery p := &poller{config: config, waitingFor: waitingFor} - return wait.PollImmediate(options.pollInterval, options.maxTime, p.poll) + return wait.PollUntilContextTimeout(context.TODO(), options.PollInterval, options.MaxTime, true, p.poll) } -// poller checks if all the resources have been found in discovery, and returns false if not +// poller checks if all the resources have been found in discovery, and returns false if not. type poller struct { // config is used to get discovery config *rest.Config // waitingFor is the map of resources keyed by group version that have not yet been found in discovery - waitingFor map[schema.GroupVersion]*sets.String + waitingFor map[schema.GroupVersion]*sets.Set[string] } -// poll checks if all the resources have been found in discovery, and returns false if not -func (p *poller) poll() (done bool, err error) { +// poll checks if all the resources have been found in discovery, and returns false if not. +func (p *poller) poll(ctx context.Context) (done bool, err error) { // Create a new clientset to avoid any client caching of discovery cs, err := clientset.NewForConfig(p.config) if err != nil { @@ -164,7 +198,7 @@ func (p *poller) poll() (done bool, err error) { // TODO: Maybe the controller-runtime client should be able to do this... resourceList, err := cs.Discovery().ServerResourcesForGroupVersion(gv.Group + "/" + gv.Version) if err != nil { - return false, nil + return false, nil //nolint:nilerr } // Remove each found resource from the resources set that we are waiting for @@ -180,70 +214,231 @@ func (p *poller) poll() (done bool, err error) { return allFound, nil } -// CreateCRDs creates the CRDs -func CreateCRDs(config *rest.Config, crds []*apiextensionsv1beta1.CustomResourceDefinition) error { - cs, err := clientset.NewForConfig(config) +// UninstallCRDs uninstalls a collection of CRDs by reading the crd yaml files from a directory. +func UninstallCRDs(config *rest.Config, options CRDInstallOptions) error { + // Read the CRD yamls into options.CRDs + if err := ReadCRDFiles(&options); err != nil { + return err + } + + // Delete the CRDs from the apiserver + cs, err := client.New(config, client.Options{}) if err != nil { return err } + // Uninstall each CRD + for _, crd := range options.CRDs { + log.V(1).Info("uninstalling CRD", "crd", crd.GetName()) + if err := cs.Delete(context.TODO(), crd); err != nil { + // If CRD is not found, we can consider success + if !apierrors.IsNotFound(err) { + return err + } + } + } + + return nil +} + +// CreateCRDs creates the CRDs. +func CreateCRDs(config *rest.Config, crds []*apiextensionsv1.CustomResourceDefinition) error { + cs, err := client.New(config, client.Options{}) + if err != nil { + return fmt.Errorf("unable to create client: %w", err) + } + // Create each CRD for _, crd := range crds { - log.V(1).Info("installing CRD", "crd", crd) - if _, err := cs.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd); err != nil { - return err + log.V(1).Info("installing CRD", "crd", crd.GetName()) + existingCrd := crd.DeepCopy() + err := cs.Get(context.TODO(), client.ObjectKey{Name: crd.GetName()}, existingCrd) + switch { + case apierrors.IsNotFound(err): + if err := cs.Create(context.TODO(), crd); err != nil { + return fmt.Errorf("unable to create CRD %q: %w", crd.GetName(), err) + } + case err != nil: + return fmt.Errorf("unable to get CRD %q to check if it exists: %w", crd.GetName(), err) + default: + log.V(1).Info("CRD already exists, updating", "crd", crd.GetName()) + if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := cs.Get(context.TODO(), client.ObjectKey{Name: crd.GetName()}, existingCrd); err != nil { + return err + } + crd.SetResourceVersion(existingCrd.GetResourceVersion()) + return cs.Update(context.TODO(), crd) + }); err != nil { + return err + } } } return nil } -// readCRDs reads the CRDs from files and Unmarshals them into structs -func readCRDs(path string) ([]*apiextensionsv1beta1.CustomResourceDefinition, error) { - // Get the CRD files - var files []os.FileInfo - var err error - log.V(1).Info("reading CRDs from path", "path", path) - if files, err = ioutil.ReadDir(path); err != nil { - return nil, err +// renderCRDs iterate through options.Paths and extract all CRD files. +func renderCRDs(options *CRDInstallOptions) ([]*apiextensionsv1.CustomResourceDefinition, error) { + type GVKN struct { + GVK schema.GroupVersionKind + Name string } + crds := map[GVKN]*apiextensionsv1.CustomResourceDefinition{} + + for _, path := range options.Paths { + var ( + err error + info os.FileInfo + files []string + filePath = path + ) + + // Return the error if ErrorIfPathMissing exists + if info, err = os.Stat(path); os.IsNotExist(err) { + if options.ErrorIfPathMissing { + return nil, err + } + continue + } + + if !info.IsDir() { + filePath, files = filepath.Dir(path), []string{info.Name()} + } else { + entries, err := os.ReadDir(path) + if err != nil { + return nil, err + } + for _, e := range entries { + files = append(files, e.Name()) + } + } + + log.V(1).Info("reading CRDs from path", "path", path) + crdList, err := readCRDs(filePath, files) + if err != nil { + return nil, err + } + + for i, crd := range crdList { + gvkn := GVKN{GVK: crd.GroupVersionKind(), Name: crd.GetName()} + if _, found := crds[gvkn]; found { + // Currently, we only print a log when there are duplicates. We may want to error out if that makes more sense. + log.Info("there are more than one CRD definitions with the same ", "GVKN", gvkn) + } + // We always use the CRD definition that we found last. + crds[gvkn] = crdList[i] + } + } + + // Converting map to a list to return + res := []*apiextensionsv1.CustomResourceDefinition{} + for _, obj := range crds { + res = append(res, obj) + } + return res, nil +} + +// modifyConversionWebhooks takes all the registered CustomResourceDefinitions and applies modifications +// to conditionally enable webhooks if the type is registered within the scheme. +func modifyConversionWebhooks(crds []*apiextensionsv1.CustomResourceDefinition, scheme *runtime.Scheme, webhookOptions WebhookInstallOptions) error { + if len(webhookOptions.LocalServingCAData) == 0 { + return nil + } + + // Determine all registered convertible types. + convertibles := map[schema.GroupKind]struct{}{} + for gvk := range scheme.AllKnownTypes() { + obj, err := scheme.New(gvk) + if err != nil { + return err + } + if ok, err := conversion.IsConvertible(scheme, obj); ok && err == nil { + convertibles[gvk.GroupKind()] = struct{}{} + } + } + + // generate host port. + hostPort, err := webhookOptions.generateHostPort() + if err != nil { + return err + } + url := ptr.To(fmt.Sprintf("https://%s/convert", hostPort)) + + for i := range crds { + // Continue if we're preserving unknown fields. + if crds[i].Spec.PreserveUnknownFields { + continue + } + if !webhookOptions.IgnoreSchemeConvertible { + // Continue if the GroupKind isn't registered as being convertible, + // and remove any existing conversion webhooks if they exist. + // This is to prevent the CRD from being rejected by the apiserver, usually + // manifests that are generated by controller-gen will have a conversion + // webhook set, but we don't want to enable it if the type isn't registered. + if _, ok := convertibles[schema.GroupKind{ + Group: crds[i].Spec.Group, + Kind: crds[i].Spec.Names.Kind, + }]; !ok { + crds[i].Spec.Conversion = nil + continue + } + } + if crds[i].Spec.Conversion == nil { + crds[i].Spec.Conversion = &apiextensionsv1.CustomResourceConversion{ + Webhook: &apiextensionsv1.WebhookConversion{}, + } + } + crds[i].Spec.Conversion.Strategy = apiextensionsv1.WebhookConverter + crds[i].Spec.Conversion.Webhook.ConversionReviewVersions = []string{"v1", "v1beta1"} + crds[i].Spec.Conversion.Webhook.ClientConfig = &apiextensionsv1.WebhookClientConfig{ + Service: nil, + URL: url, + CABundle: webhookOptions.LocalServingCAData, + } + } + + return nil +} + +// readCRDs reads the CRDs from files and Unmarshals them into structs. +func readCRDs(basePath string, files []string) ([]*apiextensionsv1.CustomResourceDefinition, error) { + var crds []*apiextensionsv1.CustomResourceDefinition + // White list the file extensions that may contain CRDs crdExts := sets.NewString(".json", ".yaml", ".yml") - var crds []*apiextensionsv1beta1.CustomResourceDefinition for _, file := range files { - // Only parse whitelisted file types - if !crdExts.Has(filepath.Ext(file.Name())) { + // Only parse allowlisted file types + if !crdExts.Has(filepath.Ext(file)) { continue } // Unmarshal CRDs from file into structs - docs, err := readDocuments(filepath.Join(path, file.Name())) + docs, err := readDocuments(filepath.Join(basePath, file)) if err != nil { return nil, err } for _, doc := range docs { - crd := &apiextensionsv1beta1.CustomResourceDefinition{} + crd := &apiextensionsv1.CustomResourceDefinition{} if err = yaml.Unmarshal(doc, crd); err != nil { return nil, err } - // Check that it is actually a CRD - if crd.Spec.Names.Kind == "" || crd.Spec.Group == "" { + if crd.Kind != "CustomResourceDefinition" || crd.Spec.Names.Kind == "" || crd.Spec.Group == "" { continue } crds = append(crds, crd) } - log.V(1).Info("read CRDs from file", "file", file.Name()) + log.V(1).Info("read CRDs from file", "file", file) } return crds, nil } -// readDocuments reads documents from file +// readDocuments reads documents from file. func readDocuments(fp string) ([][]byte, error) { - b, err := ioutil.ReadFile(fp) + b, err := os.ReadFile(fp) if err != nil { return nil, err } @@ -254,7 +449,7 @@ func readDocuments(fp string) ([][]byte, error) { // Read document doc, err := reader.Read() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { break } diff --git a/pkg/envtest/crd_test.go b/pkg/envtest/crd_test.go new file mode 100644 index 0000000000..a1406615d6 --- /dev/null +++ b/pkg/envtest/crd_test.go @@ -0,0 +1,51 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/util/sets" +) + +var _ = Describe("Test", func() { + Describe("readCRDFiles", func() { + It("should not mix up files from different directories", func() { + opt := CRDInstallOptions{ + Paths: []string{ + "testdata/crds", + "testdata/crdv1_original", + }, + } + err := ReadCRDFiles(&opt) + Expect(err).NotTo(HaveOccurred()) + + expectedCRDs := sets.NewString( + "frigates.ship.example.com", + "configs.foo.example.com", + "drivers.crew.example.com", + ) + + foundCRDs := sets.NewString() + for _, crd := range opt.CRDs { + foundCRDs.Insert(crd.Name) + } + + Expect(expectedCRDs).To(Equal(foundCRDs)) + }) + }) +}) diff --git a/pkg/envtest/envtest_suite_test.go b/pkg/envtest/envtest_suite_test.go index 46aba52138..f7788bf090 100644 --- a/pkg/envtest/envtest_suite_test.go +++ b/pkg/envtest/envtest_suite_test.go @@ -19,30 +19,116 @@ package envtest import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + admissionv1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" ) func TestSource(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, "EnvTest Suite", []Reporter{NewlineReporter{}}) + RunSpecs(t, "Envtest Suite") } var env *Environment -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) env = &Environment{} + // we're initializing webhook here and not in webhook.go to also test the envtest install code via WebhookOptions + initializeWebhookInEnvironment() _, err := env.Start() Expect(err).NotTo(HaveOccurred()) +}) - close(done) -}, StartTimeout) +func initializeWebhookInEnvironment() { + namespacedScopeV1 := admissionv1.NamespacedScope + failedTypeV1 := admissionv1.Fail + equivalentTypeV1 := admissionv1.Equivalent + noSideEffectsV1 := admissionv1.SideEffectClassNone + webhookPathV1 := "/failing" -var _ = AfterSuite(func(done Done) { - Expect(env.Stop()).NotTo(HaveOccurred()) + env.WebhookInstallOptions = WebhookInstallOptions{ + ValidatingWebhooks: []*admissionv1.ValidatingWebhookConfiguration{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-validation-webhook-config", + }, + TypeMeta: metav1.TypeMeta{ + Kind: "ValidatingWebhookConfiguration", + APIVersion: "admissionregistration.k8s.io/v1", + }, + Webhooks: []admissionv1.ValidatingWebhook{ + { + Name: "deployment-validation.kubebuilder.io", + Rules: []admissionv1.RuleWithOperations{ + { + Operations: []admissionv1.OperationType{"CREATE", "UPDATE"}, + Rule: admissionv1.Rule{ + APIGroups: []string{"apps"}, + APIVersions: []string{"v1"}, + Resources: []string{"deployments"}, + Scope: &namespacedScopeV1, + }, + }, + }, + FailurePolicy: &failedTypeV1, + MatchPolicy: &equivalentTypeV1, + SideEffects: &noSideEffectsV1, + ClientConfig: admissionv1.WebhookClientConfig{ + Service: &admissionv1.ServiceReference{ + Name: "deployment-validation-service", + Namespace: "default", + Path: &webhookPathV1, + }, + }, + AdmissionReviewVersions: []string{"v1"}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-validation-webhook-config", + }, + TypeMeta: metav1.TypeMeta{ + Kind: "ValidatingWebhookConfiguration", + APIVersion: "admissionregistration.k8s.io/v1", + }, + Webhooks: []admissionv1.ValidatingWebhook{ + { + Name: "deployment-validation.kubebuilder.io", + Rules: []admissionv1.RuleWithOperations{ + { + Operations: []admissionv1.OperationType{"CREATE", "UPDATE"}, + Rule: admissionv1.Rule{ + APIGroups: []string{"apps"}, + APIVersions: []string{"v1"}, + Resources: []string{"deployments"}, + Scope: &namespacedScopeV1, + }, + }, + }, + FailurePolicy: &failedTypeV1, + MatchPolicy: &equivalentTypeV1, + SideEffects: &noSideEffectsV1, + ClientConfig: admissionv1.WebhookClientConfig{ + Service: &admissionv1.ServiceReference{ + Name: "deployment-validation-service", + Namespace: "default", + Path: &webhookPathV1, + }, + }, + AdmissionReviewVersions: []string{"v1"}, + }, + }, + }, + }, + } +} - close(done) -}, StopTimeout) +var _ = AfterSuite(func() { + Expect(env.Stop()).NotTo(HaveOccurred()) +}) diff --git a/pkg/envtest/envtest_test.go b/pkg/envtest/envtest_test.go index cf06ebc48a..ce3e9a4d3f 100644 --- a/pkg/envtest/envtest_test.go +++ b/pkg/envtest/envtest_test.go @@ -17,121 +17,214 @@ limitations under the License. package envtest import ( - "context" "path/filepath" - "time" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/clientcmd" "sigs.k8s.io/controller-runtime/pkg/client" ) var _ = Describe("Test", func() { - var crds []*v1beta1.CustomResourceDefinition + var crds []*apiextensionsv1.CustomResourceDefinition var err error var s *runtime.Scheme var c client.Client + var validDirectory = filepath.Join(".", "testdata") + var invalidDirectory = "fake" + + var teardownTimeoutSeconds float64 = 10 + // Initialize the client - BeforeEach(func(done Done) { - crds = []*v1beta1.CustomResourceDefinition{} - s = runtime.NewScheme() - err = v1beta1.AddToScheme(s) + BeforeEach(func() { + crds = []*apiextensionsv1.CustomResourceDefinition{} + s = scheme.Scheme + err = apiextensionsv1.AddToScheme(s) Expect(err).NotTo(HaveOccurred()) c, err = client.New(env.Config, client.Options{Scheme: s}) Expect(err).NotTo(HaveOccurred()) - - close(done) }) // Cleanup CRDs - AfterEach(func(done Done) { + AfterEach(func(ctx SpecContext) { for _, crd := range crds { - Expect(c.Delete(context.TODO(), crd)).To(Succeed()) + // Delete only if CRD exists. + crdObjectKey := client.ObjectKey{ + Name: crd.GetName(), + } + var placeholder apiextensionsv1.CustomResourceDefinition + if err = c.Get(ctx, crdObjectKey, &placeholder); err != nil && + apierrors.IsNotFound(err) { + // CRD doesn't need to be deleted. + continue + } + Expect(err).NotTo(HaveOccurred()) + Expect(c.Delete(ctx, crd)).To(Succeed()) + Eventually(func() bool { + err := c.Get(ctx, crdObjectKey, &placeholder) + return apierrors.IsNotFound(err) + }, 5*time.Second).Should(BeTrue()) } - close(done) - }) + }, teardownTimeoutSeconds) Describe("InstallCRDs", func() { - It("should install the CRDs into the cluster", func(done Done) { + It("should install the unserved CRDs into the cluster", func(ctx SpecContext) { + crds, err = InstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{filepath.Join(".", "testdata", "crds", "examplecrd_unserved.yaml")}, + }) + Expect(err).NotTo(HaveOccurred()) + + // Expect to find the CRDs + + crd := &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "frigates.ship.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Frigate")) + err = WaitForCRDs(env.Config, []*apiextensionsv1.CustomResourceDefinition{ + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "ship.example.com", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "frigates", + }, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: false, + }, + { + Name: "v1beta1", + Storage: false, + Served: false, + }, + }}, + }, + }, + CRDInstallOptions{MaxTime: 50 * time.Millisecond, PollInterval: 15 * time.Millisecond}, + ) + Expect(err).NotTo(HaveOccurred()) + }) + It("should install the CRDs into the cluster using directory", func(ctx SpecContext) { crds, err = InstallCRDs(env.Config, CRDInstallOptions{ - Paths: []string{filepath.Join(".", "testdata")}, + Paths: []string{validDirectory}, }) Expect(err).NotTo(HaveOccurred()) // Expect to find the CRDs - crd := &v1beta1.CustomResourceDefinition{} - err = c.Get(context.TODO(), types.NamespacedName{Name: "foos.bar.example.com"}, crd) + crd := &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "foos.bar.example.com"}, crd) Expect(err).NotTo(HaveOccurred()) Expect(crd.Spec.Names.Kind).To(Equal("Foo")) - crd = &v1beta1.CustomResourceDefinition{} - err = c.Get(context.TODO(), types.NamespacedName{Name: "bazs.qux.example.com"}, crd) + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "bazs.qux.example.com"}, crd) Expect(err).NotTo(HaveOccurred()) Expect(crd.Spec.Names.Kind).To(Equal("Baz")) - crd = &v1beta1.CustomResourceDefinition{} - err = c.Get(context.TODO(), types.NamespacedName{Name: "captains.crew.example.com"}, crd) + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "captains.crew.example.com"}, crd) Expect(err).NotTo(HaveOccurred()) Expect(crd.Spec.Names.Kind).To(Equal("Captain")) - crd = &v1beta1.CustomResourceDefinition{} - err = c.Get(context.TODO(), types.NamespacedName{Name: "firstmates.crew.example.com"}, crd) + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "firstmates.crew.example.com"}, crd) Expect(err).NotTo(HaveOccurred()) Expect(crd.Spec.Names.Kind).To(Equal("FirstMate")) - crd = &v1beta1.CustomResourceDefinition{} - err = c.Get(context.TODO(), types.NamespacedName{Name: "drivers.crew.example.com"}, crd) + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "drivers.crew.example.com"}, crd) Expect(err).NotTo(HaveOccurred()) Expect(crd.Spec.Names.Kind).To(Equal("Driver")) - err = WaitForCRDs(env.Config, []*v1beta1.CustomResourceDefinition{ + err = WaitForCRDs(env.Config, []*apiextensionsv1.CustomResourceDefinition{ { - Spec: v1beta1.CustomResourceDefinitionSpec{ - Group: "qux.example.com", - Version: "v1beta1", - Names: v1beta1.CustomResourceDefinitionNames{ - Plural: "bazs", + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "bar.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + }, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "foos", }}, }, { - Spec: v1beta1.CustomResourceDefinitionSpec{ - Group: "bar.example.com", - Version: "v1beta1", - Names: v1beta1.CustomResourceDefinitionNames{ - Plural: "foos", + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "qux.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "bazs", }}, }, { - Spec: v1beta1.CustomResourceDefinitionSpec{ - Group: "crew.example.com", - Version: "v1beta1", - Names: v1beta1.CustomResourceDefinitionNames{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ Plural: "captains", }}, }, { - Spec: v1beta1.CustomResourceDefinitionSpec{ - Group: "crew.example.com", - Version: "v1beta1", - Names: v1beta1.CustomResourceDefinitionNames{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ Plural: "firstmates", }}, }, { - Spec: v1beta1.CustomResourceDefinitionSpec{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ Group: "crew.example.com", - Names: v1beta1.CustomResourceDefinitionNames{ + Names: apiextensionsv1.CustomResourceDefinitionNames{ Plural: "drivers", }, - Versions: []v1beta1.CustomResourceDefinitionVersion{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ { Name: "v1", Storage: true, @@ -145,75 +238,729 @@ var _ = Describe("Test", func() { }}, }, }, - CRDInstallOptions{maxTime: 50 * time.Millisecond, pollInterval: 15 * time.Millisecond}, + CRDInstallOptions{MaxTime: 50 * time.Millisecond, PollInterval: 15 * time.Millisecond}, ) Expect(err).NotTo(HaveOccurred()) + }) + + It("should install the CRDs into the cluster using file", func(ctx SpecContext) { + crds, err = InstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{filepath.Join(".", "testdata", "crds", "examplecrd3.yaml")}, + }) + Expect(err).NotTo(HaveOccurred()) + + crd := &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "configs.foo.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Config")) + + err = WaitForCRDs(env.Config, []*apiextensionsv1.CustomResourceDefinition{ + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "foo.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "configs", + }}, + }, + }, + CRDInstallOptions{MaxTime: 50 * time.Millisecond, PollInterval: 15 * time.Millisecond}, + ) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should be able to install CRDs using multiple files", func() { + crds, err = InstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{ + filepath.Join(".", "testdata", "examplecrd.yaml"), + filepath.Join(".", "testdata", "examplecrd_v1.yaml"), + }, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(crds).To(HaveLen(2)) + }) - close(done) - }, 5) + It("should filter out already existent CRD", func(ctx SpecContext) { + crds, err = InstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{ + filepath.Join(".", "testdata"), + filepath.Join(".", "testdata", "examplecrd1.yaml"), + }, + }) + Expect(err).NotTo(HaveOccurred()) - It("should not return an not error if the directory doesn't exist", func(done Done) { - crds, err = InstallCRDs(env.Config, CRDInstallOptions{Paths: []string{"fake"}}) + crd := &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "foos.bar.example.com"}, crd) Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Foo")) - close(done) - }, 5) + err = WaitForCRDs(env.Config, []*apiextensionsv1.CustomResourceDefinition{ + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "bar.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + }, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "foos", + }}, + }, + }, + CRDInstallOptions{MaxTime: 50 * time.Millisecond, PollInterval: 15 * time.Millisecond}, + ) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should not return an not error if the directory doesn't exist", func() { + crds, err = InstallCRDs(env.Config, CRDInstallOptions{Paths: []string{invalidDirectory}}) + Expect(err).NotTo(HaveOccurred()) + }) - It("should return an error if the directory doesn't exist", func(done Done) { - crds, err = InstallCRDs(env.Config, CRDInstallOptions{Paths: []string{"fake"}, ErrorIfPathMissing: true}) + It("should return an error if the directory doesn't exist", func() { + crds, err = InstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{invalidDirectory}, ErrorIfPathMissing: true, + }) Expect(err).To(HaveOccurred()) + }) - close(done) - }, 5) + It("should return an error if the file doesn't exist", func() { + crds, err = InstallCRDs(env.Config, CRDInstallOptions{Paths: []string{ + filepath.Join(".", "testdata", "fake.yaml")}, ErrorIfPathMissing: true, + }) + Expect(err).To(HaveOccurred()) + }) - It("should return an error if the resource group version isn't found", func(done Done) { + It("should return an error if the resource group version isn't found", func() { // Wait for a CRD where the Group and Version don't exist err := WaitForCRDs(env.Config, - []*v1beta1.CustomResourceDefinition{ + []*apiextensionsv1.CustomResourceDefinition{ { - Spec: v1beta1.CustomResourceDefinitionSpec{ - Version: "v1", - Names: v1beta1.CustomResourceDefinitionNames{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ Plural: "notfound", }}, }, }, - CRDInstallOptions{maxTime: 50 * time.Millisecond, pollInterval: 15 * time.Millisecond}, + CRDInstallOptions{MaxTime: 50 * time.Millisecond, PollInterval: 15 * time.Millisecond}, ) Expect(err).To(HaveOccurred()) + }) - close(done) - }, 5) - - It("should return an error if the resource isn't found in the group version", func(done Done) { + It("should return an error if the resource isn't found in the group version", func() { crds, err = InstallCRDs(env.Config, CRDInstallOptions{ Paths: []string{"."}, }) Expect(err).NotTo(HaveOccurred()) // Wait for a CRD that doesn't exist, but the Group and Version do - err = WaitForCRDs(env.Config, []*v1beta1.CustomResourceDefinition{ + err = WaitForCRDs(env.Config, []*apiextensionsv1.CustomResourceDefinition{ { - Spec: v1beta1.CustomResourceDefinitionSpec{ - Group: "qux.example.com", - Version: "v1beta1", - Names: v1beta1.CustomResourceDefinitionNames{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "qux.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ Plural: "bazs", }}, }, { - Spec: v1beta1.CustomResourceDefinitionSpec{ - Group: "bar.example.com", - Version: "v1beta1", - Names: v1beta1.CustomResourceDefinitionNames{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "bar.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ Plural: "fake", }}, }}, - CRDInstallOptions{maxTime: 50 * time.Millisecond, pollInterval: 15 * time.Millisecond}, + CRDInstallOptions{MaxTime: 50 * time.Millisecond, PollInterval: 15 * time.Millisecond}, ) Expect(err).To(HaveOccurred()) + }) + + It("should reinstall the CRDs if already present in the cluster", func(ctx SpecContext) { + + crds, err = InstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{filepath.Join(".", "testdata")}, + }) + Expect(err).NotTo(HaveOccurred()) + + // Expect to find the CRDs + + crd := &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "foos.bar.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Foo")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "bazs.qux.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Baz")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "captains.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Captain")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "firstmates.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("FirstMate")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "drivers.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Driver")) + + err = WaitForCRDs(env.Config, []*apiextensionsv1.CustomResourceDefinition{ + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "bar.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + }, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "foos", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "qux.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "bazs", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "captains", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "firstmates", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "drivers", + }, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + }, + { + Name: "v2", + Storage: false, + Served: true, + }, + }}, + }, + }, + CRDInstallOptions{MaxTime: 50 * time.Millisecond, PollInterval: 15 * time.Millisecond}, + ) + Expect(err).NotTo(HaveOccurred()) + + // Try to re-install the CRDs + + crds, err = InstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{filepath.Join(".", "testdata")}, + }) + Expect(err).NotTo(HaveOccurred()) + + // Expect to find the CRDs + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "foos.bar.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Foo")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "bazs.qux.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Baz")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "captains.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Captain")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "firstmates.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("FirstMate")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "drivers.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Driver")) + + err = WaitForCRDs(env.Config, []*apiextensionsv1.CustomResourceDefinition{ + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "bar.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + }, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "foos", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "qux.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "bazs", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "captains", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "firstmates", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "drivers", + }, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + }, + { + Name: "v2", + Storage: false, + Served: true, + }, + }}, + }, + }, + CRDInstallOptions{MaxTime: 50 * time.Millisecond, PollInterval: 15 * time.Millisecond}, + ) + Expect(err).NotTo(HaveOccurred()) + }) + }) + + It("should set a working KubeConfig", func(ctx SpecContext) { + kubeconfigRESTConfig, err := clientcmd.RESTConfigFromKubeConfig(env.KubeConfig) + Expect(err).ToNot(HaveOccurred()) + kubeconfigClient, err := client.New(kubeconfigRESTConfig, client.Options{Scheme: s}) + Expect(err).NotTo(HaveOccurred()) + Expect(kubeconfigClient.List(ctx, &apiextensionsv1.CustomResourceDefinitionList{})).To(Succeed()) + }) + + It("should update CRDs if already present in the cluster", func(ctx SpecContext) { + + // Install only the CRDv1 multi-version example + crds, err = InstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{filepath.Join(".", "testdata")}, + }) + Expect(err).NotTo(HaveOccurred()) + + // Expect to find the CRDs + + crd := &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "drivers.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Driver")) + Expect(len(crd.Spec.Versions)).To(BeEquivalentTo(2)) + + // Store resource version for comparison later on + firstRV := crd.ResourceVersion + + err = WaitForCRDs(env.Config, []*apiextensionsv1.CustomResourceDefinition{ + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "drivers", + }, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + }, + { + Name: "v2", + Storage: false, + Served: true, + }, + }}, + }, + }, + CRDInstallOptions{MaxTime: 50 * time.Millisecond, PollInterval: 15 * time.Millisecond}, + ) + Expect(err).NotTo(HaveOccurred()) + + // Add one more version and update + _, err = InstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{filepath.Join(".", "testdata", "crdv1_updated")}, + }) + Expect(err).NotTo(HaveOccurred()) + + // Expect to find updated CRD + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "drivers.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Driver")) + Expect(len(crd.Spec.Versions)).To(BeEquivalentTo(3)) + Expect(crd.ResourceVersion).NotTo(BeEquivalentTo(firstRV)) + + err = WaitForCRDs(env.Config, []*apiextensionsv1.CustomResourceDefinition{ + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "drivers", + }, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + }, + { + Name: "v2", + Storage: false, + Served: true, + }, + { + Name: "v3", + Storage: false, + Served: true, + }, + }}, + }, + }, + CRDInstallOptions{MaxTime: 50 * time.Millisecond, PollInterval: 15 * time.Millisecond}, + ) + Expect(err).NotTo(HaveOccurred()) + }) + + Describe("UninstallCRDs", func() { + It("should uninstall the CRDs from the cluster", func(ctx SpecContext) { + crds, err = InstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{validDirectory}, + }) + Expect(err).NotTo(HaveOccurred()) + + // Expect to find the CRDs + + crd := &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "foos.bar.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Foo")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "bazs.qux.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Baz")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "captains.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Captain")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "firstmates.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("FirstMate")) + + crd = &apiextensionsv1.CustomResourceDefinition{} + err = c.Get(ctx, types.NamespacedName{Name: "drivers.crew.example.com"}, crd) + Expect(err).NotTo(HaveOccurred()) + Expect(crd.Spec.Names.Kind).To(Equal("Driver")) + + err = WaitForCRDs(env.Config, []*apiextensionsv1.CustomResourceDefinition{ + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "bar.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + }, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "foos", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "qux.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "bazs", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "captains", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Served: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{}, + }, + }, + }, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "firstmates", + }}, + }, + { + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "crew.example.com", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "drivers", + }, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Storage: true, + Served: true, + }, + { + Name: "v2", + Storage: false, + Served: true, + }, + }}, + }, + }, + CRDInstallOptions{MaxTime: 50 * time.Millisecond, PollInterval: 15 * time.Millisecond}, + ) + Expect(err).NotTo(HaveOccurred()) + + err = UninstallCRDs(env.Config, CRDInstallOptions{ + Paths: []string{validDirectory}, + }) + Expect(err).NotTo(HaveOccurred()) + + // Expect to NOT find the CRDs + + crds := []string{ + "foos.bar.example.com", + "bazs.qux.example.com", + "captains.crew.example.com", + "firstmates.crew.example.com", + "drivers.crew.example.com", + } + placeholder := &apiextensionsv1.CustomResourceDefinition{} + Eventually(func() bool { + for _, crd := range crds { + err = c.Get(ctx, types.NamespacedName{Name: crd}, placeholder) + notFound := err != nil && apierrors.IsNotFound(err) + if !notFound { + return false + } + } + return true + }, 20).Should(BeTrue()) + }) + }) + + Describe("Start", func() { + It("should raise an error on invalid dir when flag is enabled", func() { + env := &Environment{ErrorIfCRDPathMissing: true, CRDDirectoryPaths: []string{invalidDirectory}} + _, err := env.Start() + Expect(err).To(HaveOccurred()) + Expect(env.Stop()).To(Succeed()) + }) + + It("should not raise an error on invalid dir when flag is disabled", func() { + env := &Environment{ErrorIfCRDPathMissing: false, CRDDirectoryPaths: []string{invalidDirectory}} + _, err := env.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(env.Stop()).To(Succeed()) + }) + }) + + Describe("Stop", func() { + It("should cleanup webhook /tmp folder with no error when using existing cluster", func() { + env := &Environment{} + _, err := env.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(env.Stop()).To(Succeed()) - close(done) - }, 5) + // check if the /tmp/envtest-serving-certs-* dir doesnt exists any more + Expect(env.WebhookInstallOptions.LocalServingCertDir).ShouldNot(BeADirectory()) + }) }) }) diff --git a/pkg/envtest/ginkgo.go b/pkg/envtest/ginkgo.go deleted file mode 100644 index 8995687f98..0000000000 --- a/pkg/envtest/ginkgo.go +++ /dev/null @@ -1,11 +0,0 @@ -package envtest - -import ( - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" -) - -// NewlineReporter is Reporter that Prints a newline after the default Reporter output so that the results -// are correctly parsed by test automation. -// See issue https://github.com/jstemmer/go-junit-report/issues/31 -// It's re-exported here to avoid compatibility breakage/mass rewrites. -type NewlineReporter = printer.NewlineReporter diff --git a/pkg/envtest/helper.go b/pkg/envtest/helper.go new file mode 100644 index 0000000000..d3b52017d2 --- /dev/null +++ b/pkg/envtest/helper.go @@ -0,0 +1,69 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/client-go/kubernetes/scheme" +) + +var ( + crdScheme = scheme.Scheme +) + +// init is required to correctly initialize the crdScheme package variable. +func init() { + _ = apiextensionsv1.AddToScheme(crdScheme) +} + +// mergePaths merges two string slices containing paths. +// This function makes no guarantees about order of the merged slice. +func mergePaths(s1, s2 []string) []string { + m := make(map[string]struct{}) + for _, s := range s1 { + m[s] = struct{}{} + } + for _, s := range s2 { + m[s] = struct{}{} + } + merged := make([]string, len(m)) + i := 0 + for key := range m { + merged[i] = key + i++ + } + return merged +} + +// mergeCRDs merges two CRD slices using their names. +// This function makes no guarantees about order of the merged slice. +func mergeCRDs(s1, s2 []*apiextensionsv1.CustomResourceDefinition) []*apiextensionsv1.CustomResourceDefinition { + m := make(map[string]*apiextensionsv1.CustomResourceDefinition) + for _, obj := range s1 { + m[obj.GetName()] = obj + } + for _, obj := range s2 { + m[obj.GetName()] = obj + } + merged := make([]*apiextensionsv1.CustomResourceDefinition, len(m)) + i := 0 + for _, obj := range m { + merged[i] = obj.DeepCopy() + i++ + } + return merged +} diff --git a/pkg/envtest/komega/OWNERS b/pkg/envtest/komega/OWNERS new file mode 100644 index 0000000000..45f63b0e2e --- /dev/null +++ b/pkg/envtest/komega/OWNERS @@ -0,0 +1,13 @@ +approvers: + - controller-runtime-admins + - controller-runtime-maintainers + - controller-runtime-approvers + - schrej + - JoelSpeed +reviewers: + - controller-runtime-admins + - controller-runtime-maintainers + - controller-runtime-approvers + - controller-runtime-reviewers + - schrej + - JoelSpeed diff --git a/pkg/envtest/komega/default.go b/pkg/envtest/komega/default.go new file mode 100644 index 0000000000..dad1f551ae --- /dev/null +++ b/pkg/envtest/komega/default.go @@ -0,0 +1,102 @@ +package komega + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// defaultK is the Komega used by the package global functions. +var defaultK = &komega{ctx: context.Background()} + +// SetClient sets the client used by the package global functions. +func SetClient(c client.Client) { + defaultK.client = c +} + +// SetContext sets the context used by the package global functions. +func SetContext(c context.Context) { + defaultK.ctx = c +} + +func checkDefaultClient() { + if defaultK.client == nil { + panic("Default Komega's client is not set. Use SetClient to set it.") + } +} + +// Get returns a function that fetches a resource and returns the occurring error. +// It can be used with gomega.Eventually() like this +// +// deployment := appsv1.Deployment{ ... } +// gomega.Eventually(komega.Get(&deployment)).Should(gomega.Succeed()) +// +// By calling the returned function directly it can also be used with gomega.Expect(komega.Get(...)()).To(...) +func Get(obj client.Object) func() error { + checkDefaultClient() + return defaultK.Get(obj) +} + +// List returns a function that lists resources and returns the occurring error. +// It can be used with gomega.Eventually() like this +// +// deployments := v1.DeploymentList{ ... } +// gomega.Eventually(k.List(&deployments)).Should(gomega.Succeed()) +// +// By calling the returned function directly it can also be used as gomega.Expect(k.List(...)()).To(...) +func List(list client.ObjectList, opts ...client.ListOption) func() error { + checkDefaultClient() + return defaultK.List(list, opts...) +} + +// Update returns a function that fetches a resource, applies the provided update function and then updates the resource. +// It can be used with gomega.Eventually() like this: +// +// deployment := appsv1.Deployment{ ... } +// gomega.Eventually(k.Update(&deployment, func() { +// deployment.Spec.Replicas = 3 +// })).Should(gomega.Succeed()) +// +// By calling the returned function directly it can also be used as gomega.Expect(k.Update(...)()).To(...) +func Update(obj client.Object, f func(), opts ...client.UpdateOption) func() error { + checkDefaultClient() + return defaultK.Update(obj, f, opts...) +} + +// UpdateStatus returns a function that fetches a resource, applies the provided update function and then updates the resource's status. +// It can be used with gomega.Eventually() like this: +// +// deployment := appsv1.Deployment{ ... } +// gomega.Eventually(k.UpdateStatus(&deployment, func() { +// deployment.Status.AvailableReplicas = 1 +// })).Should(gomega.Succeed()) +// +// By calling the returned function directly it can also be used as gomega.Expect(k.UpdateStatus(...)()).To(...) +func UpdateStatus(obj client.Object, f func(), opts ...client.SubResourceUpdateOption) func() error { + checkDefaultClient() + return defaultK.UpdateStatus(obj, f, opts...) +} + +// Object returns a function that fetches a resource and returns the object. +// It can be used with gomega.Eventually() like this: +// +// deployment := appsv1.Deployment{ ... } +// gomega.Eventually(k.Object(&deployment)).Should(HaveField("Spec.Replicas", gomega.Equal(ptr.To(3)))) +// +// By calling the returned function directly it can also be used as gomega.Expect(k.Object(...)()).To(...) +func Object(obj client.Object) func() (client.Object, error) { + checkDefaultClient() + return defaultK.Object(obj) +} + +// ObjectList returns a function that fetches a resource and returns the object. +// It can be used with gomega.Eventually() like this: +// +// deployments := appsv1.DeploymentList{ ... } +// gomega.Eventually(k.ObjectList(&deployments)).Should(HaveField("Items", HaveLen(1))) +// +// By calling the returned function directly it can also be used as gomega.Expect(k.ObjectList(...)()).To(...) +func ObjectList(list client.ObjectList, opts ...client.ListOption) func() (client.ObjectList, error) { + checkDefaultClient() + return defaultK.ObjectList(list, opts...) +} diff --git a/pkg/envtest/komega/default_test.go b/pkg/envtest/komega/default_test.go new file mode 100644 index 0000000000..1a1de72cf3 --- /dev/null +++ b/pkg/envtest/komega/default_test.go @@ -0,0 +1,116 @@ +package komega + +import ( + "testing" + + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" +) + +func TestDefaultGet(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + SetClient(fc) + + fetched := appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test", + }, + } + g.Eventually(Get(&fetched)).Should(Succeed()) + + g.Expect(*fetched.Spec.Replicas).To(BeEquivalentTo(5)) +} + +func TestDefaultList(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + SetClient(fc) + + list := appsv1.DeploymentList{} + g.Eventually(List(&list)).Should(Succeed()) + + g.Expect(list.Items).To(HaveLen(1)) + depl := exampleDeployment() + g.Expect(list.Items[0]).To(And( + HaveField("ObjectMeta.Name", Equal(depl.ObjectMeta.Name)), + HaveField("ObjectMeta.Namespace", Equal(depl.ObjectMeta.Namespace)), + )) +} + +func TestDefaultUpdate(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + SetClient(fc) + + updateDeployment := appsv1.Deployment{ + ObjectMeta: exampleDeployment().ObjectMeta, + } + g.Eventually(Update(&updateDeployment, func() { + updateDeployment.Annotations = map[string]string{"updated": "true"} + })).Should(Succeed()) + + fetched := appsv1.Deployment{ + ObjectMeta: exampleDeployment().ObjectMeta, + } + g.Expect(Object(&fetched)()).To(HaveField("ObjectMeta.Annotations", HaveKeyWithValue("updated", "true"))) +} + +func TestDefaultUpdateStatus(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + SetClient(fc) + + updateDeployment := appsv1.Deployment{ + ObjectMeta: exampleDeployment().ObjectMeta, + } + g.Eventually(UpdateStatus(&updateDeployment, func() { + updateDeployment.Status.AvailableReplicas = 1 + })).Should(Succeed()) + + fetched := appsv1.Deployment{ + ObjectMeta: exampleDeployment().ObjectMeta, + } + g.Expect(Object(&fetched)()).To(HaveField("Status.AvailableReplicas", BeEquivalentTo(1))) +} + +func TestDefaultObject(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + SetClient(fc) + + fetched := appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test", + }, + } + g.Eventually(Object(&fetched)).Should(And( + Not(BeNil()), + HaveField("Spec.Replicas", Equal(ptr.To(int32(5)))), + )) +} + +func TestDefaultObjectList(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + SetClient(fc) + + list := appsv1.DeploymentList{} + g.Eventually(ObjectList(&list)).Should(And( + Not(BeNil()), + HaveField("Items", And( + HaveLen(1), + ContainElement(HaveField("Spec.Replicas", Equal(ptr.To(int32(5))))), + )), + )) +} diff --git a/pkg/envtest/komega/equalobject.go b/pkg/envtest/komega/equalobject.go new file mode 100644 index 0000000000..a931c2718a --- /dev/null +++ b/pkg/envtest/komega/equalobject.go @@ -0,0 +1,297 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package komega + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp" + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/types" + "k8s.io/apimachinery/pkg/runtime" +) + +// These package variables hold pre-created commonly used options that can be used to reduce the manual work involved in +// identifying the paths that need to be compared for testing equality between objects. +var ( + // IgnoreAutogeneratedMetadata contains the paths for all the metadata fields that are commonly set by the + // client and APIServer. This is used as a MatchOption for situations when only user-provided metadata is relevant. + IgnoreAutogeneratedMetadata = IgnorePaths{ + "metadata.uid", + "metadata.generation", + "metadata.creationTimestamp", + "metadata.resourceVersion", + "metadata.managedFields", + "metadata.deletionGracePeriodSeconds", + "metadata.deletionTimestamp", + "metadata.selfLink", + "metadata.generateName", + } +) + +type diffPath struct { + types []string + json []string +} + +// equalObjectMatcher is a Gomega matcher used to establish equality between two Kubernetes runtime.Objects. +type equalObjectMatcher struct { + // original holds the object that will be used to Match. + original runtime.Object + + // diffPaths contains the paths that differ between two objects. + diffPaths []diffPath + + // options holds the options that identify what should and should not be matched. + options *EqualObjectOptions +} + +// EqualObject returns a Matcher for the passed Kubernetes runtime.Object with the passed Options. This function can be +// used as a Gomega Matcher in Gomega Assertions. +func EqualObject(original runtime.Object, opts ...EqualObjectOption) types.GomegaMatcher { + matchOptions := &EqualObjectOptions{} + matchOptions = matchOptions.ApplyOptions(opts) + + return &equalObjectMatcher{ + options: matchOptions, + original: original, + } +} + +// Match compares the current object to the passed object and returns true if the objects are the same according to +// the Matcher and MatchOptions. +func (m *equalObjectMatcher) Match(actual interface{}) (success bool, err error) { + // Nil checks required first here for: + // 1) Nil equality which returns true + // 2) One object nil which returns an error + actualIsNil := reflect.ValueOf(actual).IsNil() + originalIsNil := reflect.ValueOf(m.original).IsNil() + + if actualIsNil && originalIsNil { + return true, nil + } + if actualIsNil || originalIsNil { + return false, fmt.Errorf("can not compare an object with a nil. original %v , actual %v", m.original, actual) + } + + m.diffPaths = m.calculateDiff(actual) + return len(m.diffPaths) == 0, nil +} + +// FailureMessage returns a message comparing the full objects after an unexpected failure to match has occurred. +func (m *equalObjectMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("the following fields were expected to match but did not:\n%v\n%s", m.diffPaths, + format.Message(actual, "expected to match", m.original)) +} + +// NegatedFailureMessage returns a string stating that all fields matched, even though that was not expected. +func (m *equalObjectMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return "it was expected that some fields do not match, but all of them did" +} + +func (d diffPath) String() string { + return fmt.Sprintf("(%s/%s)", strings.Join(d.types, "."), strings.Join(d.json, ".")) +} + +// diffReporter is a custom recorder for cmp.Diff which records all paths that are +// different between two objects. +type diffReporter struct { + stack []cmp.PathStep + + diffPaths []diffPath +} + +func (r *diffReporter) PushStep(s cmp.PathStep) { + r.stack = append(r.stack, s) +} + +func (r *diffReporter) Report(res cmp.Result) { + if !res.Equal() { + r.diffPaths = append(r.diffPaths, r.currentPath()) + } +} + +// currentPath converts the current stack into string representations that match +// the IgnorePaths and MatchPaths syntax. +func (r *diffReporter) currentPath() diffPath { + p := diffPath{types: []string{""}, json: []string{""}} + for si, s := range r.stack[1:] { + switch s := s.(type) { + case cmp.StructField: + p.types = append(p.types, s.String()[1:]) + // fetch the type information from the parent struct. + // Note: si has an offset of 1 compared to r.stack as we loop over r.stack[1:], so we don't need -1 + field := r.stack[si].Type().Field(s.Index()) + p.json = append(p.json, strings.Split(field.Tag.Get("json"), ",")[0]) + case cmp.SliceIndex: + key := fmt.Sprintf("[%d]", s.Key()) + p.types[len(p.types)-1] += key + p.json[len(p.json)-1] += key + case cmp.MapIndex: + key := fmt.Sprintf("%v", s.Key()) + if strings.ContainsAny(key, ".[]/\\") { + key = fmt.Sprintf("[%s]", key) + p.types[len(p.types)-1] += key + p.json[len(p.json)-1] += key + } else { + p.types = append(p.types, key) + p.json = append(p.json, key) + } + } + } + // Empty strings were added as the first element. If they're still empty, remove them again. + if len(p.json) > 0 && len(p.json[0]) == 0 { + p.json = p.json[1:] + p.types = p.types[1:] + } + return p +} + +func (r *diffReporter) PopStep() { + r.stack = r.stack[:len(r.stack)-1] +} + +// calculateDiff calculates the difference between two objects and returns the +// paths of the fields that do not match. +func (m *equalObjectMatcher) calculateDiff(actual interface{}) []diffPath { + var original interface{} = m.original + // Remove the wrapping Object from unstructured.Unstructured to make comparison behave similar to + // regular objects. + if u, isUnstructured := actual.(runtime.Unstructured); isUnstructured { + actual = u.UnstructuredContent() + } + if u, ok := m.original.(runtime.Unstructured); ok { + original = u.UnstructuredContent() + } + r := diffReporter{} + cmp.Diff(original, actual, cmp.Reporter(&r)) + return filterDiffPaths(*m.options, r.diffPaths) +} + +// filterDiffPaths filters the diff paths using the paths in EqualObjectOptions. +func filterDiffPaths(opts EqualObjectOptions, paths []diffPath) []diffPath { + result := []diffPath{} + + for _, p := range paths { + if len(opts.matchPaths) > 0 && !hasAnyPathPrefix(p, opts.matchPaths) { + continue + } + if hasAnyPathPrefix(p, opts.ignorePaths) { + continue + } + + result = append(result, p) + } + + return result +} + +// hasPathPrefix compares the segments of a path. +func hasPathPrefix(path []string, prefix []string) bool { + for i, p := range prefix { + if i >= len(path) { + return false + } + // return false if a segment doesn't match + if path[i] != p && (i < len(prefix)-1 || !segmentHasPrefix(path[i], p)) { + return false + } + } + return true +} + +func segmentHasPrefix(s, prefix string) bool { + return len(s) >= len(prefix) && s[0:len(prefix)] == prefix && + // if it is a prefix match, make sure the next character is a [ for array/map access + (len(s) == len(prefix) || s[len(prefix)] == '[') +} + +// hasAnyPathPrefix returns true if path matches any of the path prefixes. +// It respects the name boundaries within paths, so 'ObjectMeta.Name' does not +// match 'ObjectMeta.Namespace' for example. +func hasAnyPathPrefix(path diffPath, prefixes [][]string) bool { + for _, prefix := range prefixes { + if hasPathPrefix(path.types, prefix) || hasPathPrefix(path.json, prefix) { + return true + } + } + return false +} + +// EqualObjectOption describes an Option that can be applied to a Matcher. +type EqualObjectOption interface { + // ApplyToEqualObjectMatcher applies this configuration to the given MatchOption. + ApplyToEqualObjectMatcher(options *EqualObjectOptions) +} + +// EqualObjectOptions holds the available types of EqualObjectOptions that can be applied to a Matcher. +type EqualObjectOptions struct { + ignorePaths [][]string + matchPaths [][]string +} + +// ApplyOptions adds the passed MatchOptions to the MatchOptions struct. +func (o *EqualObjectOptions) ApplyOptions(opts []EqualObjectOption) *EqualObjectOptions { + for _, opt := range opts { + opt.ApplyToEqualObjectMatcher(o) + } + return o +} + +// IgnorePaths instructs the Matcher to ignore given paths when computing a diff. +// Paths are written in a syntax similar to Go with a few special cases. Both types and +// json/yaml field names are supported. +// +// Regular Paths: +// * "ObjectMeta.Name" +// * "metadata.name" +// Arrays: +// * "metadata.ownerReferences[0].name" +// Maps, if they do not contain any of .[]/\: +// * "metadata.labels.something" +// Maps, if they contain any of .[]/\: +// * "metadata.labels[kubernetes.io/something]" +type IgnorePaths []string + +// ApplyToEqualObjectMatcher applies this configuration to the given MatchOptions. +func (i IgnorePaths) ApplyToEqualObjectMatcher(opts *EqualObjectOptions) { + for _, p := range i { + opts.ignorePaths = append(opts.ignorePaths, strings.Split(p, ".")) + } +} + +// MatchPaths instructs the Matcher to restrict its diff to the given paths. If empty the Matcher will look at all paths. +// Paths are written in a syntax similar to Go with a few special cases. Both types and +// json/yaml field names are supported. +// +// Regular Paths: +// * "ObjectMeta.Name" +// * "metadata.name" +// Arrays: +// * "metadata.ownerReferences[0].name" +// Maps, if they do not contain any of .[]/\: +// * "metadata.labels.something" +// Maps, if they contain any of .[]/\: +// * "metadata.labels[kubernetes.io/something]" +type MatchPaths []string + +// ApplyToEqualObjectMatcher applies this configuration to the given MatchOptions. +func (i MatchPaths) ApplyToEqualObjectMatcher(opts *EqualObjectOptions) { + for _, p := range i { + opts.matchPaths = append(opts.ignorePaths, strings.Split(p, ".")) + } +} diff --git a/pkg/envtest/komega/equalobject_test.go b/pkg/envtest/komega/equalobject_test.go new file mode 100644 index 0000000000..9fe10d1779 --- /dev/null +++ b/pkg/envtest/komega/equalobject_test.go @@ -0,0 +1,662 @@ +package komega + +import ( + "testing" + + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestEqualObjectMatcher(t *testing.T) { + cases := []struct { + name string + original client.Object + modified client.Object + options []EqualObjectOption + want bool + }{ + { + name: "succeed with equal objects", + original: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + }, + modified: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + }, + want: true, + }, + { + name: "fail with non equal objects", + original: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + }, + modified: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "somethingelse", + }, + }, + want: false, + }, + { + name: "succeeds if ignored fields do not match", + original: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Labels: map[string]string{"somelabel": "somevalue"}, + OwnerReferences: []metav1.OwnerReference{{ + Name: "controller", + }}, + }, + }, + modified: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "somethingelse", + Labels: map[string]string{"somelabel": "anothervalue"}, + OwnerReferences: []metav1.OwnerReference{{ + Name: "another", + }}, + }, + }, + want: true, + options: []EqualObjectOption{ + IgnorePaths{ + "ObjectMeta.Name", + "ObjectMeta.CreationTimestamp", + "ObjectMeta.Labels.somelabel", + "ObjectMeta.OwnerReferences[0].Name", + "Spec.Template.ObjectMeta", + }, + }, + }, + { + name: "succeeds if ignored fields in json notation do not match", + original: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Labels: map[string]string{"somelabel": "somevalue"}, + OwnerReferences: []metav1.OwnerReference{{ + Name: "controller", + }}, + }, + }, + modified: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "somethingelse", + Labels: map[string]string{"somelabel": "anothervalue"}, + OwnerReferences: []metav1.OwnerReference{{ + Name: "another", + }}, + }, + }, + want: true, + options: []EqualObjectOption{ + IgnorePaths{ + "metadata.name", + "metadata.creationTimestamp", + "metadata.labels.somelabel", + "metadata.ownerReferences[0].name", + "spec.template.metadata", + }, + }, + }, + { + name: "succeeds if all allowed fields match, and some others do not", + original: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + }, + modified: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "special", + }, + }, + want: true, + options: []EqualObjectOption{ + MatchPaths{ + "ObjectMeta.Name", + }, + }, + }, + { + name: "works with unstructured.Unstructured", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "something", + "namespace": "test", + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "somethingelse", + "namespace": "test", + }, + }, + }, + want: true, + options: []EqualObjectOption{ + IgnorePaths{ + "metadata.name", + }, + }, + }, + + // Test when objects are equal. + { + name: "Equal field (spec) both in original and in modified", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "foo": "bar", + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "foo": "bar", + }, + }, + }, + want: true, + }, + + { + name: "Equal nested field both in original and in modified", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + }, + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + }, + }, + }, + }, + want: true, + }, + + // Test when there is a difference between the objects. + { + name: "Unequal field both in original and in modified", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "foo": "bar-changed", + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "foo": "bar", + }, + }, + }, + want: false, + }, + { + name: "Unequal nested field both in original and modified", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A-Changed", + }, + }, + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + }, + }, + }, + }, + want: false, + }, + + { + name: "Value of type map with different values", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "map": map[string]string{ + "A": "A-changed", + "B": "B", + // C missing + }, + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "map": map[string]string{ + "A": "A", + // B missing + "C": "C", + }, + }, + }, + }, + want: false, + }, + + { + name: "Value of type Array or Slice with same length but different values", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "slice": []string{ + "D", + "C", + "B", + }, + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "slice": []string{ + "A", + "B", + "C", + }, + }, + }, + }, + want: false, + }, + + // This tests specific behaviour in how Kubernetes marshals the zero value of metav1.Time{}. + { + name: "Creation timestamp set to empty value on both original and modified", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + "metadata": map[string]interface{}{ + "selfLink": "foo", + "creationTimestamp": metav1.Time{}, + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + "metadata": map[string]interface{}{ + "selfLink": "foo", + "creationTimestamp": metav1.Time{}, + }, + }, + }, + want: true, + }, + + // Cases to test diff when fields exist only in modified object. + { + name: "Field only in modified", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{}, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "foo": "bar", + }, + }, + }, + want: false, + }, + { + name: "Nested field only in modified", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{}, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + }, + }, + }, + }, + want: false, + }, + { + name: "Creation timestamp exists on modified but not on original", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + "metadata": map[string]interface{}{ + "selfLink": "foo", + "creationTimestamp": "2021-11-03T11:05:17Z", + }, + }, + }, + want: false, + }, + + // Test when fields exists only in the original object. + { + name: "Field only in original", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "foo": "bar", + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{}, + }, + want: false, + }, + { + name: "Nested field only in original", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + }, + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{}, + }, + want: false, + }, + { + name: "Creation timestamp exists on original but not on modified", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + "metadata": map[string]interface{}{ + "selfLink": "foo", + "creationTimestamp": "2021-11-03T11:05:17Z", + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + }, + }, + + want: false, + }, + + // Test metadata fields computed by the system or in status are compared. + { + name: "Unequal Metadata fields computed by the system or in status", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{}, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "selfLink": "foo", + "uid": "foo", + "resourceVersion": "foo", + "generation": "foo", + "managedFields": "foo", + }, + "status": map[string]interface{}{ + "foo": "bar", + }, + }, + }, + want: false, + }, + { + name: "Unequal labels and annotations", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{}, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{ + "foo": "bar", + }, + "annotations": map[string]interface{}{ + "foo": "bar", + }, + }, + }, + }, + want: false, + }, + + // Ignore fields MatchOption + { + name: "Unequal metadata fields ignored by IgnorePaths MatchOption", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "test", + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "test", + "selfLink": "foo", + "uid": "foo", + "resourceVersion": "foo", + "generation": "foo", + "managedFields": "foo", + }, + }, + }, + options: []EqualObjectOption{IgnoreAutogeneratedMetadata}, + want: true, + }, + { + name: "Unequal labels and annotations ignored by IgnorePaths MatchOption", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "test", + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "test", + "labels": map[string]interface{}{ + "foo": "bar", + }, + "annotations": map[string]interface{}{ + "foo": "bar", + }, + }, + }, + }, + options: []EqualObjectOption{IgnorePaths{"metadata.labels", "metadata.annotations"}}, + want: true, + }, + { + name: "Ignore fields are not compared", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{}, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "controlPlaneEndpoint": map[string]interface{}{ + "host": "", + "port": 0, + }, + }, + }, + }, + options: []EqualObjectOption{IgnorePaths{"spec.controlPlaneEndpoint"}}, + want: true, + }, + { + name: "Not-ignored fields are still compared", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "annotations": map[string]interface{}{}, + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "annotations": map[string]interface{}{ + "ignored": "somevalue", + "superflous": "shouldcausefailure", + }, + }, + }, + }, + options: []EqualObjectOption{IgnorePaths{"metadata.annotations.ignored"}}, + want: false, + }, + + // MatchPaths MatchOption + { + name: "Unequal metadata fields not compared by setting MatchPaths MatchOption", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + }, + "metadata": map[string]interface{}{ + "selfLink": "foo", + "uid": "foo", + }, + }, + }, + options: []EqualObjectOption{MatchPaths{"spec"}}, + want: true, + }, + + // More tests + { + name: "No changes", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + "B": "B", + "C": "C", // C only in original + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + "B": "B", + }, + }, + }, + want: false, + }, + { + name: "Many changes", + original: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + // B missing + "C": "C", // C only in original + }, + }, + }, + modified: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{ + "A": "A", + "B": "B", + }, + }, + }, + want: false, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + g := NewWithT(t) + m := EqualObject(c.original, c.options...) + success, _ := m.Match(c.modified) + if !success { + t.Log(m.FailureMessage(c.modified)) + } + g.Expect(success).To(Equal(c.want)) + }) + } +} diff --git a/pkg/envtest/komega/interfaces.go b/pkg/envtest/komega/interfaces.go new file mode 100644 index 0000000000..b412e5c1bf --- /dev/null +++ b/pkg/envtest/komega/interfaces.go @@ -0,0 +1,76 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package komega + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Komega is a collection of utilites for writing tests involving a mocked +// Kubernetes API. +type Komega interface { + // Get returns a function that fetches a resource and returns the occurring error. + // It can be used with gomega.Eventually() like this + // deployment := appsv1.Deployment{ ... } + // gomega.Eventually(k.Get(&deployment)).To(gomega.Succeed()) + // By calling the returned function directly it can also be used with gomega.Expect(k.Get(...)()).To(...) + Get(client.Object) func() error + + // List returns a function that lists resources and returns the occurring error. + // It can be used with gomega.Eventually() like this + // deployments := v1.DeploymentList{ ... } + // gomega.Eventually(k.List(&deployments)).To(gomega.Succeed()) + // By calling the returned function directly it can also be used as gomega.Expect(k.List(...)()).To(...) + List(client.ObjectList, ...client.ListOption) func() error + + // Update returns a function that fetches a resource, applies the provided update function and then updates the resource. + // It can be used with gomega.Eventually() like this: + // deployment := appsv1.Deployment{ ... } + // gomega.Eventually(k.Update(&deployment, func() { + // deployment.Spec.Replicas = 3 + // })).To(gomega.Succeed()) + // By calling the returned function directly it can also be used as gomega.Expect(k.Update(...)()).To(...) + Update(client.Object, func(), ...client.UpdateOption) func() error + + // UpdateStatus returns a function that fetches a resource, applies the provided update function and then updates the resource's status. + // It can be used with gomega.Eventually() like this: + // deployment := appsv1.Deployment{ ... } + // gomega.Eventually(k.Update(&deployment, func() { + // deployment.Status.AvailableReplicas = 1 + // })).To(gomega.Succeed()) + // By calling the returned function directly it can also be used as gomega.Expect(k.UpdateStatus(...)()).To(...) + UpdateStatus(client.Object, func(), ...client.SubResourceUpdateOption) func() error + + // Object returns a function that fetches a resource and returns the object. + // It can be used with gomega.Eventually() like this: + // deployment := appsv1.Deployment{ ... } + // gomega.Eventually(k.Object(&deployment)).To(HaveField("Spec.Replicas", gomega.Equal(ptr.To(int32(3))))) + // By calling the returned function directly it can also be used as gomega.Expect(k.Object(...)()).To(...) + Object(client.Object) func() (client.Object, error) + + // ObjectList returns a function that fetches a resource and returns the object. + // It can be used with gomega.Eventually() like this: + // deployments := appsv1.DeploymentList{ ... } + // gomega.Eventually(k.ObjectList(&deployments)).To(HaveField("Items", HaveLen(1))) + // By calling the returned function directly it can also be used as gomega.Expect(k.ObjectList(...)()).To(...) + ObjectList(client.ObjectList, ...client.ListOption) func() (client.ObjectList, error) + + // WithContext returns a copy that uses the given context. + WithContext(context.Context) Komega +} diff --git a/pkg/envtest/komega/komega.go b/pkg/envtest/komega/komega.go new file mode 100644 index 0000000000..e19d9b5f0b --- /dev/null +++ b/pkg/envtest/komega/komega.go @@ -0,0 +1,117 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package komega + +import ( + "context" + + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// komega is a collection of utilites for writing tests involving a mocked +// Kubernetes API. +type komega struct { + ctx context.Context + client client.Client +} + +var _ Komega = &komega{} + +// New creates a new Komega instance with the given client. +func New(c client.Client) Komega { + return &komega{ + client: c, + ctx: context.Background(), + } +} + +// WithContext returns a copy that uses the given context. +func (k komega) WithContext(ctx context.Context) Komega { + k.ctx = ctx + return &k +} + +// Get returns a function that fetches a resource and returns the occurring error. +func (k *komega) Get(obj client.Object) func() error { + key := types.NamespacedName{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + } + return func() error { + return k.client.Get(k.ctx, key, obj) + } +} + +// List returns a function that lists resources and returns the occurring error. +func (k *komega) List(obj client.ObjectList, opts ...client.ListOption) func() error { + return func() error { + return k.client.List(k.ctx, obj, opts...) + } +} + +// Update returns a function that fetches a resource, applies the provided update function and then updates the resource. +func (k *komega) Update(obj client.Object, updateFunc func(), opts ...client.UpdateOption) func() error { + key := types.NamespacedName{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + } + return func() error { + err := k.client.Get(k.ctx, key, obj) + if err != nil { + return err + } + updateFunc() + return k.client.Update(k.ctx, obj, opts...) + } +} + +// UpdateStatus returns a function that fetches a resource, applies the provided update function and then updates the resource's status. +func (k *komega) UpdateStatus(obj client.Object, updateFunc func(), opts ...client.SubResourceUpdateOption) func() error { + key := types.NamespacedName{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + } + return func() error { + err := k.client.Get(k.ctx, key, obj) + if err != nil { + return err + } + updateFunc() + return k.client.Status().Update(k.ctx, obj, opts...) + } +} + +// Object returns a function that fetches a resource and returns the object. +func (k *komega) Object(obj client.Object) func() (client.Object, error) { + key := types.NamespacedName{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + } + return func() (client.Object, error) { + err := k.client.Get(k.ctx, key, obj) + return obj, err + } +} + +// ObjectList returns a function that fetches a resource and returns the object. +func (k *komega) ObjectList(obj client.ObjectList, opts ...client.ListOption) func() (client.ObjectList, error) { + return func() (client.ObjectList, error) { + err := k.client.List(k.ctx, obj, opts...) + return obj, err + } +} diff --git a/pkg/envtest/komega/komega_test.go b/pkg/envtest/komega/komega_test.go new file mode 100644 index 0000000000..8867ac239a --- /dev/null +++ b/pkg/envtest/komega/komega_test.go @@ -0,0 +1,138 @@ +package komega + +import ( + "testing" + + _ "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func exampleDeployment() *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(5)), + }, + } +} + +func createFakeClient() client.Client { + return fakeclient.NewClientBuilder(). + WithObjects(exampleDeployment()). + Build() +} + +func TestGet(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + k := New(fc) + + fetched := appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test", + }, + } + g.Eventually(k.Get(&fetched)).Should(Succeed()) + + g.Expect(*fetched.Spec.Replicas).To(BeEquivalentTo(5)) +} + +func TestList(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + k := New(fc) + + list := appsv1.DeploymentList{} + g.Eventually(k.List(&list)).Should(Succeed()) + + g.Expect(list.Items).To(HaveLen(1)) + depl := exampleDeployment() + g.Expect(list.Items[0]).To(And( + HaveField("ObjectMeta.Name", Equal(depl.ObjectMeta.Name)), + HaveField("ObjectMeta.Namespace", Equal(depl.ObjectMeta.Namespace)), + )) +} + +func TestUpdate(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + k := New(fc) + + updateDeployment := appsv1.Deployment{ + ObjectMeta: exampleDeployment().ObjectMeta, + } + g.Eventually(k.Update(&updateDeployment, func() { + updateDeployment.Annotations = map[string]string{"updated": "true"} + })).Should(Succeed()) + + fetched := appsv1.Deployment{ + ObjectMeta: exampleDeployment().ObjectMeta, + } + g.Expect(k.Object(&fetched)()).To(HaveField("ObjectMeta.Annotations", HaveKeyWithValue("updated", "true"))) +} + +func TestUpdateStatus(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + k := New(fc) + + updateDeployment := appsv1.Deployment{ + ObjectMeta: exampleDeployment().ObjectMeta, + } + g.Eventually(k.UpdateStatus(&updateDeployment, func() { + updateDeployment.Status.AvailableReplicas = 1 + })).Should(Succeed()) + + fetched := appsv1.Deployment{ + ObjectMeta: exampleDeployment().ObjectMeta, + } + g.Expect(k.Object(&fetched)()).To(HaveField("Status.AvailableReplicas", BeEquivalentTo(1))) +} + +func TestObject(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + k := New(fc) + + fetched := appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test", + }, + } + g.Eventually(k.Object(&fetched)).Should(And( + Not(BeNil()), + HaveField("Spec.Replicas", Equal(ptr.To(int32(5)))), + )) +} + +func TestObjectList(t *testing.T) { + g := NewWithT(t) + + fc := createFakeClient() + k := New(fc) + + list := appsv1.DeploymentList{} + g.Eventually(k.ObjectList(&list)).Should(And( + Not(BeNil()), + HaveField("Items", And( + HaveLen(1), + ContainElement(HaveField("Spec.Replicas", Equal(ptr.To(int32(5))))), + )), + )) +} diff --git a/pkg/envtest/printer/ginkgo.go b/pkg/envtest/printer/ginkgo.go deleted file mode 100644 index 1435a1a435..0000000000 --- a/pkg/envtest/printer/ginkgo.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package printer contains setup for a friendlier Ginkgo printer that's easier -// to parse by test automation. -package printer - -import ( - "fmt" - - "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/types" -) - -var _ ginkgo.Reporter = NewlineReporter{} - -// NewlineReporter is Reporter that Prints a newline after the default Reporter output so that the results -// are correctly parsed by test automation. -// See issue https://github.com/jstemmer/go-junit-report/issues/31 -type NewlineReporter struct{} - -// SpecSuiteWillBegin implements ginkgo.Reporter -func (NewlineReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { -} - -// BeforeSuiteDidRun implements ginkgo.Reporter -func (NewlineReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {} - -// AfterSuiteDidRun implements ginkgo.Reporter -func (NewlineReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {} - -// SpecWillRun implements ginkgo.Reporter -func (NewlineReporter) SpecWillRun(specSummary *types.SpecSummary) {} - -// SpecDidComplete implements ginkgo.Reporter -func (NewlineReporter) SpecDidComplete(specSummary *types.SpecSummary) {} - -// SpecSuiteDidEnd Prints a newline between "35 Passed | 0 Failed | 0 Pending | 0 Skipped" and "--- PASS:" -func (NewlineReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { fmt.Printf("\n") } diff --git a/pkg/envtest/server.go b/pkg/envtest/server.go index f6801e5519..9bb81ed2ab 100644 --- a/pkg/envtest/server.go +++ b/pkg/envtest/server.go @@ -17,77 +17,162 @@ limitations under the License. package envtest import ( + "context" "fmt" "os" - "path/filepath" + "strings" "time" - apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/client/config" - "sigs.k8s.io/testing_frameworks/integration" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" ) var log = logf.RuntimeLog.WithName("test-env") -// Default binary path for test framework +/* +It's possible to override some defaults, by setting the following environment variables: +* USE_EXISTING_CLUSTER (boolean): if set to true, envtest will use an existing cluster +* TEST_ASSET_KUBE_APISERVER (string): path to the api-server binary to use +* TEST_ASSET_ETCD (string): path to the etcd binary to use +* TEST_ASSET_KUBECTL (string): path to the kubectl binary to use +* KUBEBUILDER_ASSETS (string): directory containing the binaries to use (api-server, etcd and kubectl). Defaults to /usr/local/kubebuilder/bin. +* KUBEBUILDER_CONTROLPLANE_START_TIMEOUT (string supported by time.ParseDuration): timeout for test control plane to start. Defaults to 20s. +* KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT (string supported by time.ParseDuration): timeout for test control plane to start. Defaults to 20s. +* KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT (boolean): if set to true, the control plane's stdout and stderr are attached to os.Stdout and os.Stderr +*/ const ( - envKubeAPIServerBin = "TEST_ASSET_KUBE_APISERVER" - envEtcdBin = "TEST_ASSET_ETCD" - envKubectlBin = "TEST_ASSET_KUBECTL" - envKubebuilderPath = "KUBEBUILDER_ASSETS" - envStartTimeout = "KUBEBUILDER_CONTROLPLANE_START_TIMEOUT" - envStopTimeout = "KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT" - defaultKubebuilderPath = "/usr/local/kubebuilder/bin" - StartTimeout = 60 - StopTimeout = 60 + envUseExistingCluster = "USE_EXISTING_CLUSTER" + envStartTimeout = "KUBEBUILDER_CONTROLPLANE_START_TIMEOUT" + envStopTimeout = "KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT" + envAttachOutput = "KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT" + StartTimeout = 60 + StopTimeout = 60 defaultKubebuilderControlPlaneStartTimeout = 20 * time.Second defaultKubebuilderControlPlaneStopTimeout = 20 * time.Second ) -func defaultAssetPath(binary string) string { - assetPath := os.Getenv(envKubebuilderPath) - if assetPath == "" { - assetPath = defaultKubebuilderPath - } - return filepath.Join(assetPath, binary) +// internal types we expose as part of our public API. +type ( + // ControlPlane is the re-exported ControlPlane type from the internal testing package. + ControlPlane = controlplane.ControlPlane -} + // APIServer is the re-exported APIServer from the internal testing package. + APIServer = controlplane.APIServer -// DefaultKubeAPIServerFlags are default flags necessary to bring up apiserver. -var DefaultKubeAPIServerFlags = []string{ - "--etcd-servers={{ if .EtcdURL }}{{ .EtcdURL.String }}{{ end }}", - "--cert-dir={{ .CertDir }}", - "--insecure-port={{ if .URL }}{{ .URL.Port }}{{ end }}", - "--insecure-bind-address={{ if .URL }}{{ .URL.Hostname }}{{ end }}", - "--secure-port={{ if .SecurePort }}{{ .SecurePort }}{{ end }}", - "--admission-control=AlwaysAdmit", -} + // Etcd is the re-exported Etcd from the internal testing package. + Etcd = controlplane.Etcd + + // User represents a Kubernetes user to provision for auth purposes. + User = controlplane.User + + // AuthenticatedUser represets a Kubernetes user that's been provisioned. + AuthenticatedUser = controlplane.AuthenticatedUser + + // ListenAddr indicates the address and port that the API server should listen on. + ListenAddr = process.ListenAddr + + // SecureServing contains details describing how the API server should serve + // its secure endpoint. + SecureServing = controlplane.SecureServing + + // Authn is an authentication method that can be used with the control plane to + // provision users. + Authn = controlplane.Authn + + // Arguments allows configuring a process's flags. + Arguments = process.Arguments + + // Arg is a single flag with one or more values. + Arg = process.Arg +) + +var ( + // EmptyArguments constructs a new set of flags with nothing set. + // + // This is mostly useful for testing helper methods -- you'll want to call + // Configure on the APIServer (or etcd) to configure their arguments. + EmptyArguments = process.EmptyArguments +) // Environment creates a Kubernetes test environment that will start / stop the Kubernetes control plane and -// install extension APIs +// install extension APIs. type Environment struct { // ControlPlane is the ControlPlane including the apiserver and etcd - ControlPlane integration.ControlPlane + ControlPlane controlplane.ControlPlane + + // Scheme is used to determine if conversion webhooks should be enabled + // for a particular CRD / object. + // + // Conversion webhooks are going to be enabled if an object in the scheme + // implements Hub and Spoke conversions. + // + // If nil, scheme.Scheme is used. + Scheme *runtime.Scheme // Config can be used to talk to the apiserver. It's automatically // populated if not set using the standard controller-runtime config // loading. Config *rest.Config - // CRDs is a list of CRDs to install - CRDs []*apiextensionsv1beta1.CustomResourceDefinition + // KubeConfig provides []byte of a kubeconfig file to talk to the apiserver + // It's automatically populated if not set based on the `Config` + KubeConfig []byte + + // CRDInstallOptions are the options for installing CRDs. + CRDInstallOptions CRDInstallOptions + + // WebhookInstallOptions are the options for installing webhooks. + WebhookInstallOptions WebhookInstallOptions + + // ErrorIfCRDPathMissing provides an interface for the underlying + // CRDInstallOptions.ErrorIfPathMissing. It prevents silent failures + // for missing CRD paths. + ErrorIfCRDPathMissing bool + + // CRDs is a list of CRDs to install. + // If both this field and CRDs field in CRDInstallOptions are specified, the + // values are merged. + CRDs []*apiextensionsv1.CustomResourceDefinition // CRDDirectoryPaths is a list of paths containing CRD yaml or json configs. + // If both this field and Paths field in CRDInstallOptions are specified, the + // values are merged. CRDDirectoryPaths []string - // UseExisting indicates that this environments should use an + // DownloadBinaryAssets indicates that the envtest binaries should be downloaded. + // If BinaryAssetsDirectory is also set, it is used to store the downloaded binaries, + // otherwise a tmp directory is created. + DownloadBinaryAssets bool + + // DownloadBinaryAssetsVersion is the version of envtest binaries to download. + // Defaults to the latest stable version (i.e. excluding alpha / beta / RC versions). + DownloadBinaryAssetsVersion string + + // DownloadBinaryAssetsIndexURL is the index used to discover envtest binaries to download. + // Defaults to https://raw.githubusercontent.com/kubernetes-sigs/controller-tools/HEAD/envtest-releases.yaml. + DownloadBinaryAssetsIndexURL string + + // BinaryAssetsDirectory is the path where the binaries required for the envtest are + // located in the local environment. This field can be overridden by setting KUBEBUILDER_ASSETS. + // Set this field to SetupEnvtestDefaultBinaryAssetsDirectory() to share binaries with setup-envtest. + BinaryAssetsDirectory string + + // UseExistingCluster indicates that this environments should use an // existing kubeconfig, instead of trying to stand up a new control plane. // This is useful in cases that need aggregated API servers and the like. - UseExistingCluster bool + UseExistingCluster *bool // ControlPlaneStartTimeout is the maximum duration each controlplane component // may take to start. It defaults to the KUBEBUILDER_CONTROLPLANE_START_TIMEOUT @@ -99,30 +184,36 @@ type Environment struct { // environment variable or 20 seconds if unspecified ControlPlaneStopTimeout time.Duration - // KubeAPIServerFlags is the set of flags passed while starting the api server. - KubeAPIServerFlags []string + // AttachControlPlaneOutput indicates if control plane output will be attached to os.Stdout and os.Stderr. + // Enable this to get more visibility of the testing control plane. + // It respect KUBEBUILDER_ATTACH_CONTROL_PLANE_OUTPUT environment variable. + AttachControlPlaneOutput bool } -// Stop stops a running server +// Stop stops a running server. +// Previously installed CRDs, as listed in CRDInstallOptions.CRDs, will be uninstalled +// if CRDInstallOptions.CleanUpAfterUse are set to true. func (te *Environment) Stop() error { - if te.UseExistingCluster { - return nil + if te.CRDInstallOptions.CleanUpAfterUse { + if err := UninstallCRDs(te.Config, te.CRDInstallOptions); err != nil { + return err + } } - return te.ControlPlane.Stop() -} -// getAPIServerFlags returns flags to be used with the Kubernetes API server. -func (te Environment) getAPIServerFlags() []string { - // Set default API server flags if not set. - if len(te.KubeAPIServerFlags) == 0 { - return DefaultKubeAPIServerFlags + if err := te.WebhookInstallOptions.Cleanup(); err != nil { + return err + } + + if te.useExistingCluster() { + return nil } - return te.KubeAPIServerFlags + + return te.ControlPlane.Stop() } -// Start starts a local Kubernetes server and updates te.ApiserverPort with the port it is listening on +// Start starts a local Kubernetes server and updates te.ApiserverPort with the port it is listening on. func (te *Environment) Start() (*rest.Config, error) { - if te.UseExistingCluster { + if te.useExistingCluster() { log.V(1).Info("using existing cluster") if te.Config == nil { // we want to allow people to pass in their own config, so @@ -132,55 +223,135 @@ func (te *Environment) Start() (*rest.Config, error) { var err error te.Config, err = config.GetConfig() if err != nil { - return nil, err + return nil, fmt.Errorf("unable to get configuration for existing cluster: %w", err) } } } else { - te.ControlPlane = integration.ControlPlane{} - te.ControlPlane.APIServer = &integration.APIServer{Args: te.getAPIServerFlags()} - te.ControlPlane.Etcd = &integration.Etcd{} + apiServer := te.ControlPlane.GetAPIServer() - if os.Getenv(envKubeAPIServerBin) == "" { - te.ControlPlane.APIServer.Path = defaultAssetPath("kube-apiserver") + if te.ControlPlane.Etcd == nil { + te.ControlPlane.Etcd = &controlplane.Etcd{} } - if os.Getenv(envEtcdBin) == "" { - te.ControlPlane.Etcd.Path = defaultAssetPath("etcd") + + if os.Getenv(envAttachOutput) == "true" { + te.AttachControlPlaneOutput = true } - if os.Getenv(envKubectlBin) == "" { - // we can't just set the path manually (it's behind a function), so set the environment variable instead - if err := os.Setenv(envKubectlBin, defaultAssetPath("kubectl")); err != nil { + if te.AttachControlPlaneOutput { + if apiServer.Out == nil { + apiServer.Out = os.Stdout + } + if apiServer.Err == nil { + apiServer.Err = os.Stderr + } + if te.ControlPlane.Etcd.Out == nil { + te.ControlPlane.Etcd.Out = os.Stdout + } + if te.ControlPlane.Etcd.Err == nil { + te.ControlPlane.Etcd.Err = os.Stderr + } + } + + if te.DownloadBinaryAssets { + apiServerPath, etcdPath, kubectlPath, err := downloadBinaryAssets(context.TODO(), + te.BinaryAssetsDirectory, te.DownloadBinaryAssetsVersion, te.DownloadBinaryAssetsIndexURL) + if err != nil { return nil, err } + + apiServer.Path = apiServerPath + te.ControlPlane.Etcd.Path = etcdPath + te.ControlPlane.KubectlPath = kubectlPath + } else { + apiServer.Path = process.BinPathFinder("kube-apiserver", te.BinaryAssetsDirectory) + te.ControlPlane.Etcd.Path = process.BinPathFinder("etcd", te.BinaryAssetsDirectory) + te.ControlPlane.KubectlPath = process.BinPathFinder("kubectl", te.BinaryAssetsDirectory) } if err := te.defaultTimeouts(); err != nil { - return nil, fmt.Errorf("failed to default controlplane timeouts: %v", err) + return nil, fmt.Errorf("failed to default controlplane timeouts: %w", err) } te.ControlPlane.Etcd.StartTimeout = te.ControlPlaneStartTimeout te.ControlPlane.Etcd.StopTimeout = te.ControlPlaneStopTimeout - te.ControlPlane.APIServer.StartTimeout = te.ControlPlaneStartTimeout - te.ControlPlane.APIServer.StopTimeout = te.ControlPlaneStopTimeout + apiServer.StartTimeout = te.ControlPlaneStartTimeout + apiServer.StopTimeout = te.ControlPlaneStopTimeout - log.V(1).Info("starting control plane", "api server flags", te.ControlPlane.APIServer.Args) + log.V(1).Info("starting control plane") if err := te.startControlPlane(); err != nil { - return nil, err + return nil, fmt.Errorf("unable to start control plane itself: %w", err) } // Create the *rest.Config for creating new clients - te.Config = &rest.Config{ - Host: te.ControlPlane.APIURL().Host, + baseConfig := &rest.Config{ // gotta go fast during tests -- we don't really care about overwhelming our test API server QPS: 1000.0, Burst: 2000.0, } + + adminInfo := User{Name: "admin", Groups: []string{"system:masters"}} + adminUser, err := te.ControlPlane.AddUser(adminInfo, baseConfig) + if err != nil { + return te.Config, fmt.Errorf("unable to provision admin user: %w", err) + } + te.Config = adminUser.Config() + } + + if len(te.KubeConfig) == 0 { + var err error + te.KubeConfig, err = controlplane.KubeConfigFromREST(te.Config) + if err != nil { + return nil, fmt.Errorf("unable to set KubeConfig field: %w", err) + } + } + + // Set the default scheme if nil. + if te.Scheme == nil { + te.Scheme = scheme.Scheme + } + + // If we are bringing etcd up for the first time, it can take some time for the + // default namespace to actually be created and seen as available to the apiserver + if err := te.waitForDefaultNamespace(te.Config); err != nil { + return nil, fmt.Errorf("default namespace didn't register within deadline: %w", err) + } + + // Call PrepWithoutInstalling to setup certificates first + // and have them available to patch CRD conversion webhook as well. + if err := te.WebhookInstallOptions.PrepWithoutInstalling(); err != nil { + return nil, err } log.V(1).Info("installing CRDs") - _, err := InstallCRDs(te.Config, CRDInstallOptions{ - Paths: te.CRDDirectoryPaths, - CRDs: te.CRDs, - }) - return te.Config, err + if te.CRDInstallOptions.Scheme == nil { + te.CRDInstallOptions.Scheme = te.Scheme + } + te.CRDInstallOptions.CRDs = mergeCRDs(te.CRDInstallOptions.CRDs, te.CRDs) + te.CRDInstallOptions.Paths = mergePaths(te.CRDInstallOptions.Paths, te.CRDDirectoryPaths) + te.CRDInstallOptions.ErrorIfPathMissing = te.ErrorIfCRDPathMissing + te.CRDInstallOptions.WebhookOptions = te.WebhookInstallOptions + crds, err := InstallCRDs(te.Config, te.CRDInstallOptions) + if err != nil { + return te.Config, fmt.Errorf("unable to install CRDs onto control plane: %w", err) + } + te.CRDs = crds + + log.V(1).Info("installing webhooks") + if err := te.WebhookInstallOptions.Install(te.Config); err != nil { + return nil, fmt.Errorf("unable to install webhooks onto control plane: %w", err) + } + return te.Config, nil +} + +// AddUser provisions a new user for connecting to this Environment. The user will +// have the specified name & belong to the specified groups. +// +// If you specify a "base" config, the returned REST Config will contain those +// settings as well as any required by the authentication method. You can use +// this to easily specify options like QPS. +// +// This is effectively a convinience alias for ControlPlane.AddUser -- see that +// for more low-level details. +func (te *Environment) AddUser(user User, baseConfig *rest.Config) (*AuthenticatedUser, error) { + return te.ControlPlane.AddUser(user, baseConfig) } func (te *Environment) startControlPlane() error { @@ -195,11 +366,25 @@ func (te *Environment) startControlPlane() error { log.Error(err, "unable to start the controlplane", "tries", numTries) } if numTries == maxRetries { - return fmt.Errorf("failed to start the controlplane. retried %d times: %v", numTries, err) + return fmt.Errorf("failed to start the controlplane. retried %d times: %w", numTries, err) } return nil } +func (te *Environment) waitForDefaultNamespace(config *rest.Config) error { + cs, err := client.New(config, client.Options{}) + if err != nil { + return fmt.Errorf("unable to create client: %w", err) + } + // It shouldn't take longer than 5s for the default namespace to be brought up in etcd + return wait.PollUntilContextTimeout(context.TODO(), time.Millisecond*50, time.Second*5, true, func(ctx context.Context) (bool, error) { + if err = cs.Get(ctx, types.NamespacedName{Name: "default"}, &corev1.Namespace{}); err != nil { + return false, nil //nolint:nilerr + } + return true, nil + }) +} + func (te *Environment) defaultTimeouts() error { var err error if te.ControlPlaneStartTimeout == 0 { @@ -225,3 +410,16 @@ func (te *Environment) defaultTimeouts() error { } return nil } + +func (te *Environment) useExistingCluster() bool { + if te.UseExistingCluster == nil { + return strings.ToLower(os.Getenv(envUseExistingCluster)) == "true" + } + return *te.UseExistingCluster +} + +// DefaultKubeAPIServerFlags exposes the default args for the APIServer so that +// you can use those to append your own additional arguments. +// +// Deprecated: use APIServer.Configure() instead. +var DefaultKubeAPIServerFlags = controlplane.APIServerDefaultArgs //nolint:staticcheck diff --git a/pkg/envtest/testdata/crds/examplecrd3.yaml b/pkg/envtest/testdata/crds/examplecrd3.yaml new file mode 100644 index 0000000000..479a6e5645 --- /dev/null +++ b/pkg/envtest/testdata/crds/examplecrd3.yaml @@ -0,0 +1,17 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: configs.foo.example.com +spec: + group: foo.example.com + names: + kind: Config + plural: configs + scope: Namespaced + versions: + - name: "v1beta1" + storage: true + served: true + schema: + openAPIV3Schema: + type: object diff --git a/pkg/envtest/testdata/crds/examplecrd_unserved.yaml b/pkg/envtest/testdata/crds/examplecrd_unserved.yaml new file mode 100644 index 0000000000..09fac4f080 --- /dev/null +++ b/pkg/envtest/testdata/crds/examplecrd_unserved.yaml @@ -0,0 +1,88 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + name: frigates.ship.example.com +spec: + group: ship.example.com + names: + kind: Frigate + listKind: FrigateList + plural: frigates + singular: frigate + scope: Namespaced + subresources: + status: {} + versions: + - name: v1 + served: false + storage: true + schema: + openAPIV3Schema: + description: Frigate is the Schema for the frigates API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FrigateSpec defines the desired state of Frigate + properties: + foo: + description: Foo is an example field of Frigate. Edit Frigate_types.go + to remove/update + type: string + type: object + status: + description: FrigateStatus defines the observed state of Frigate + type: object + type: object + - name: v1beta1 + served: false + storage: false + schema: + openAPIV3Schema: + description: Frigate is the Schema for the frigates API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: FrigateSpec defines the desired state of Frigate + properties: + foo: + description: Foo is an example field of Frigate. Edit Frigate_types.go + to remove/update + type: string + type: object + status: + description: FrigateStatus defines the observed state of Frigate + type: object + type: object +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/pkg/envtest/testdata/crdv1_original/example_multiversion_crd1.yaml b/pkg/envtest/testdata/crdv1_original/example_multiversion_crd1.yaml new file mode 100644 index 0000000000..5dead8186a --- /dev/null +++ b/pkg/envtest/testdata/crdv1_original/example_multiversion_crd1.yaml @@ -0,0 +1,61 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: drivers.crew.example.com +spec: + group: crew.example.com + names: + kind: Driver + plural: drivers + scope: "" + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + description: Driver is the Schema for the drivers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + spec: + type: object + status: + type: object + type: object + - name: v2 + served: true + storage: false + schema: + openAPIV3Schema: + description: Driver is the Schema for the drivers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + spec: + type: object + status: + type: object + type: object +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/pkg/envtest/testdata/crdv1_updated/example_multiversion_crd1_one_more_version.yaml b/pkg/envtest/testdata/crdv1_updated/example_multiversion_crd1_one_more_version.yaml new file mode 100644 index 0000000000..9eb0ec91a2 --- /dev/null +++ b/pkg/envtest/testdata/crdv1_updated/example_multiversion_crd1_one_more_version.yaml @@ -0,0 +1,83 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: drivers.crew.example.com +spec: + group: crew.example.com + names: + kind: Driver + plural: drivers + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + description: Driver is the Schema for the drivers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + spec: + type: object + status: + type: object + type: object + - name: v2 + served: true + storage: false + schema: + openAPIV3Schema: + description: Driver is the Schema for the drivers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + spec: + type: object + status: + type: object + type: object + - name: v3 + served: true + storage: false + schema: + openAPIV3Schema: + description: Driver is the Schema for the drivers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + spec: + type: object + status: + type: object + type: object +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/pkg/envtest/testdata/example_multiversion_crd1.yaml b/pkg/envtest/testdata/example_multiversion_crd1.yaml index 1999d1e02e..5bb2d73f69 100644 --- a/pkg/envtest/testdata/example_multiversion_crd1.yaml +++ b/pkg/envtest/testdata/example_multiversion_crd1.yaml @@ -1,4 +1,4 @@ -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: creationTimestamp: null @@ -8,352 +8,52 @@ spec: names: kind: Driver plural: drivers - scope: "" - validation: - openAPIV3Schema: - description: Driver is the Schema for the drivers API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - properties: - annotations: - additionalProperties: - type: string - description: 'Annotations is an unstructured key value map stored with - a resource that may be set by external tools to store and retrieve - arbitrary metadata. They are not queryable and should be preserved - when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' - type: object - clusterName: - description: The name of the cluster which the object belongs to. This - is used to distinguish resources with same name and namespace in different - clusters. This field is not set anywhere right now and apiserver is - going to ignore it if set in create or update request. - type: string - creationTimestamp: - description: "CreationTimestamp is a timestamp representing the server - time when this object was created. It is not guaranteed to be set - in happens-before order across separate operations. Clients may not - set this value. It is represented in RFC3339 form and is in UTC. \n - Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" - format: date-time - type: string - deletionGracePeriodSeconds: - description: Number of seconds allowed for this object to gracefully - terminate before it will be removed from the system. Only set when - deletionTimestamp is also set. May only be shortened. Read-only. - format: int64 - type: integer - deletionTimestamp: - description: "DeletionTimestamp is RFC 3339 date and time at which this - resource will be deleted. This field is set by the server when a graceful - deletion is requested by the user, and is not directly settable by - a client. The resource is expected to be deleted (no longer visible - from resource lists, and not reachable by name) after the time in - this field, once the finalizers list is empty. As long as the finalizers - list contains items, deletion is blocked. Once the deletionTimestamp - is set, this value may not be unset or be set further into the future, - although it may be shortened or the resource may be deleted prior - to this time. For example, a user may request that a pod is deleted - in 30 seconds. The Kubelet will react by sending a graceful termination - signal to the containers in the pod. After that 30 seconds, the Kubelet - will send a hard termination signal (SIGKILL) to the container and - after cleanup, remove the pod from the API. In the presence of network - partitions, this object may still exist after this timestamp, until - an administrator or automated process can determine the resource is - fully terminated. If not set, graceful deletion of the object has - not been requested. \n Populated by the system when a graceful deletion - is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" - format: date-time - type: string - finalizers: - description: Must be empty before the object is deleted from the registry. - Each entry is an identifier for the responsible component that will - remove the entry from the list. If the deletionTimestamp of the object - is non-nil, entries in this list can only be removed. - items: - type: string - type: array - generateName: - description: "GenerateName is an optional prefix, used by the server, - to generate a unique name ONLY IF the Name field has not been provided. - If this field is used, the name returned to the client will be different - than the name passed. This value will also be combined with a unique - suffix. The provided value has the same validation rules as the Name - field, and may be truncated by the length of the suffix required to - make the value unique on the server. \n If this field is specified - and the generated name exists, the server will NOT return a 409 - - instead, it will either return 201 Created or 500 with Reason ServerTimeout - indicating a unique name could not be found in the time allotted, - and the client should retry (optionally after the time indicated in - the Retry-After header). \n Applied only if Name is not specified. - More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency" - type: string - generation: - description: A sequence number representing a specific generation of - the desired state. Populated by the system. Read-only. - format: int64 - type: integer - initializers: - description: "An initializer is a controller which enforces some system - invariant at object creation time. This field is a list of initializers - that have not yet acted on this object. If nil or empty, this object - has been completely initialized. Otherwise, the object is considered - uninitialized and is hidden (in list/watch and get calls) from clients - that haven't explicitly asked to observe uninitialized objects. \n - When an object is created, the system will populate this list with - the current set of initializers. Only privileged users may set or - modify this list. Once it is empty, it may not be modified further - by any user." - properties: - pending: - description: Pending is a list of initializers that must execute - in order before this object is visible. When the last pending - initializer is removed, and no failing result is set, the initializers - struct will be set to nil and the object is considered as initialized - and visible to all clients. - items: - properties: - name: - description: name of the process that is responsible for initializing - this object. - type: string - required: - - name - type: object - type: array - result: - description: If result is set with the Failure field, the object - will be persisted to storage and then deleted, ensuring that other - clients can observe the deletion. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - code: - description: Suggested HTTP return code for this status, 0 if - not set. - format: int32 - type: integer - details: - description: Extended data associated with the reason. Each - reason may define its own extended details. This field is - optional and the data returned is not guaranteed to conform - to any schema except that defined by the reason type. - properties: - causes: - description: The Causes array includes more details associated - with the StatusReason failure. Not all StatusReasons may - provide detailed causes. - items: - properties: - field: - description: "The field of the resource that has caused - this error, as named by its JSON serialization. - May include dot and postfix notation for nested - attributes. Arrays are zero-indexed. Fields may - appear more than once in an array of causes due - to fields having multiple errors. Optional. \n Examples: - \ \"name\" - the field \"name\" on the current - resource \"items[0].name\" - the field \"name\" - on the first array entry in \"items\"" - type: string - message: - description: A human-readable description of the cause - of the error. This field may be presented as-is - to a reader. - type: string - reason: - description: A machine-readable description of the - cause of the error. If this value is empty there - is no information available. - type: string - type: object - type: array - group: - description: The group attribute of the resource associated - with the status StatusReason. - type: string - kind: - description: 'The kind attribute of the resource associated - with the status StatusReason. On some operations may differ - from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - name: - description: The name attribute of the resource associated - with the status StatusReason (when there is a single name - which can be described). - type: string - retryAfterSeconds: - description: If specified, the time in seconds before the - operation should be retried. Some errors may indicate - the client must take an alternate action - for those errors - this field may indicate how long to wait before taking - the alternate action. - format: int32 - type: integer - uid: - description: 'UID of the resource. (when there is a single - resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids' - type: string - type: object - kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - message: - description: A human-readable description of the status of this - operation. - type: string - metadata: - description: 'Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - properties: - continue: - description: continue may be set if the user set a limit - on the number of items returned, and indicates that the - server has more data available. The value is opaque and - may be used to issue another request to the endpoint that - served this list to retrieve the next set of available - objects. Continuing a consistent list may not be possible - if the server configuration has changed or more than a - few minutes have passed. The resourceVersion field returned - when using this continue value will be identical to the - value in the first response, unless you have received - this token from an error message. - type: string - resourceVersion: - description: 'String that identifies the server''s internal - version of this object that can be used by clients to - determine when objects have changed. Value must be treated - as opaque by clients and passed unmodified back to the - server. Populated by the system. Read-only. More info: - https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' - type: string - selfLink: - description: selfLink is a URL representing this object. - Populated by the system. Read-only. - type: string - type: object - reason: - description: A machine-readable description of why this operation - is in the "Failure" status. If this value is empty there is - no information available. A Reason clarifies an HTTP status - code but does not override it. - type: string - status: - description: 'Status of the operation. One of: "Success" or - "Failure". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status' - type: string - type: object - required: - - pending - type: object - labels: - additionalProperties: - type: string - description: 'Map of string keys and values that can be used to organize - and categorize (scope and select) objects. May match selectors of - replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' - type: object - name: - description: 'Name must be unique within a namespace. Is required when - creating resources, although some resources may allow a client to - request the generation of an appropriate name automatically. Name - is primarily intended for creation idempotence and configuration definition. - Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' - type: string - namespace: - description: "Namespace defines the space within each name must be unique. - An empty namespace is equivalent to the \"default\" namespace, but - \"default\" is the canonical representation. Not all objects are required - to be scoped to a namespace - the value of this field for those objects - will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: - http://kubernetes.io/docs/user-guide/namespaces" - type: string - ownerReferences: - description: List of objects depended by this object. If ALL objects - in the list have been deleted, this object will be garbage collected. - If this object is managed by a controller, then an entry in this list - will point to this controller, with the controller field set to true. - There cannot be more than one managing controller. - items: - properties: - apiVersion: - description: API version of the referent. - type: string - blockOwnerDeletion: - description: If true, AND if the owner has the "foregroundDeletion" - finalizer, then the owner cannot be deleted from the key-value - store until this reference is removed. Defaults to false. To - set this field, a user needs "delete" permission of the owner, - otherwise 422 (Unprocessable Entity) will be returned. - type: boolean - controller: - description: If true, this reference points to the managing controller. - type: boolean - kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - name: - description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' - type: string - uid: - description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' - type: string - required: - - apiVersion - - kind - - name - - uid - type: object - type: array - resourceVersion: - description: "An opaque value that represents the internal version of - this object that can be used by clients to determine when objects - have changed. May be used for optimistic concurrency, change detection, - and the watch operation on a resource or set of resources. Clients - must treat these values as opaque and passed unmodified back to the - server. They may only be valid for a particular resource or set of - resources. \n Populated by the system. Read-only. Value must be treated - as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency" - type: string - selfLink: - description: SelfLink is a URL representing this object. Populated by - the system. Read-only. - type: string - uid: - description: "UID is the unique in time and space value for this object. - It is typically generated by the server on successful creation of - a resource and is not allowed to change on PUT operations. \n Populated - by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" - type: string - type: object - spec: - type: object - status: - type: object - type: object + scope: Namespaced versions: - name: v1 served: true storage: true + schema: + openAPIV3Schema: + description: Driver is the Schema for the drivers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + spec: + type: object + status: + type: object + type: object - name: v2 served: true storage: false + schema: + openAPIV3Schema: + description: Driver is the Schema for the drivers API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + spec: + type: object + status: + type: object + type: object status: acceptedNames: kind: "" diff --git a/pkg/envtest/testdata/examplecrd.yaml b/pkg/envtest/testdata/examplecrd.yaml new file mode 100644 index 0000000000..f1638f8310 --- /dev/null +++ b/pkg/envtest/testdata/examplecrd.yaml @@ -0,0 +1,17 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: bazs.qux.example.com +spec: + group: qux.example.com + names: + kind: Baz + plural: bazs + scope: Namespaced + versions: + - name: "v1beta1" + storage: true + served: true + schema: + openAPIV3Schema: + type: object diff --git a/pkg/envtest/testdata/examplecrd2.yaml b/pkg/envtest/testdata/examplecrd2.yaml deleted file mode 100644 index f66835ae20..0000000000 --- a/pkg/envtest/testdata/examplecrd2.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: bazs.qux.example.com -spec: - group: qux.example.com - names: - kind: Baz - plural: bazs - scope: Namespaced - version: "v1beta1" \ No newline at end of file diff --git a/pkg/envtest/testdata/examplecrd1.yaml b/pkg/envtest/testdata/examplecrd_v1.yaml similarity index 51% rename from pkg/envtest/testdata/examplecrd1.yaml rename to pkg/envtest/testdata/examplecrd_v1.yaml index 9622eae9e2..e2bddbc528 100644 --- a/pkg/envtest/testdata/examplecrd1.yaml +++ b/pkg/envtest/testdata/examplecrd_v1.yaml @@ -1,4 +1,4 @@ -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: foos.bar.example.com @@ -8,4 +8,10 @@ spec: kind: Foo plural: foos scope: Namespaced - version: "v1beta1" \ No newline at end of file + versions: + - name: "v1" + storage: true + served: true + schema: + openAPIV3Schema: + type: object diff --git a/pkg/envtest/testdata/multiplecrds.yaml b/pkg/envtest/testdata/multiplecrds.yaml index 2148eca204..a855140ead 100644 --- a/pkg/envtest/testdata/multiplecrds.yaml +++ b/pkg/envtest/testdata/multiplecrds.yaml @@ -1,5 +1,5 @@ --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: captains.crew.example.com @@ -9,9 +9,15 @@ spec: kind: Captain plural: captains scope: Namespaced - version: "v1beta1" + versions: + - name: "v1beta1" + storage: true + served: true + schema: + openAPIV3Schema: + type: object --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: firstmates.crew.example.com @@ -21,5 +27,11 @@ spec: kind: FirstMate plural: firstmates scope: Namespaced - version: "v1beta1" ---- \ No newline at end of file + versions: + - name: "v1beta1" + storage: true + served: true + schema: + openAPIV3Schema: + type: object +--- diff --git a/pkg/envtest/testdata/webhooks/manifests.yaml b/pkg/envtest/testdata/webhooks/manifests.yaml new file mode 100644 index 0000000000..72437905cd --- /dev/null +++ b/pkg/envtest/testdata/webhooks/manifests.yaml @@ -0,0 +1,101 @@ +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + creationTimestamp: null + name: mutating-webhook-configuration +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: webhook-service + namespace: system + path: /mutate-v1 + failurePolicy: Fail + name: mpods.kb.io + rules: + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - pods +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + creationTimestamp: null + name: mutating-webhook-configuration2 +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: webhook-service + namespace: system + path: /mutate-v1 + failurePolicy: Fail + name: mpods2.kb.io + rules: + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - pods +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + creationTimestamp: null + name: validating-webhook-configuration +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: webhook-service + namespace: system + path: /validate-v1 + failurePolicy: Fail + name: vpods.kb.io + rules: + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - pods +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + creationTimestamp: null + name: validating-webhook-configuration2 +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: webhook-service + namespace: system + path: /validate-v1 + failurePolicy: Fail + name: vpods2.kb.io + rules: + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - pods + diff --git a/pkg/envtest/webhook.go b/pkg/envtest/webhook.go new file mode 100644 index 0000000000..a6961bf7c6 --- /dev/null +++ b/pkg/envtest/webhook.go @@ -0,0 +1,449 @@ +/* +Copyright 2019 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "context" + "fmt" + "net" + "os" + "path/filepath" + "time" + + admissionv1 "k8s.io/api/admissionregistration/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/yaml" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/addr" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/certs" +) + +// WebhookInstallOptions are the options for installing mutating or validating webhooks. +type WebhookInstallOptions struct { + // Paths is a list of paths to the directories or files containing the mutating or validating webhooks yaml or json configs. + Paths []string + + // MutatingWebhooks is a list of MutatingWebhookConfigurations to install + MutatingWebhooks []*admissionv1.MutatingWebhookConfiguration + + // ValidatingWebhooks is a list of ValidatingWebhookConfigurations to install + ValidatingWebhooks []*admissionv1.ValidatingWebhookConfiguration + + // IgnoreSchemeConvertible, will modify any CRD conversion webhook to use the local serving host and port, + // bypassing the need to have the types registered in the Scheme. This is useful for testing CRD conversion webhooks + // with unregistered or unstructured types. + IgnoreSchemeConvertible bool + + // IgnoreErrorIfPathMissing will ignore an error if a DirectoryPath does not exist when set to true + IgnoreErrorIfPathMissing bool + + // LocalServingHost is the host for serving webhooks on. + // it will be automatically populated + LocalServingHost string + + // LocalServingPort is the allocated port for serving webhooks on. + // it will be automatically populated by a random available local port + LocalServingPort int + + // LocalServingCertDir is the allocated directory for serving certificates. + // it will be automatically populated by the local temp dir + LocalServingCertDir string + + // CAData is the CA that can be used to trust the serving certificates in LocalServingCertDir. + LocalServingCAData []byte + + // LocalServingHostExternalName is the hostname to use to reach the webhook server. + LocalServingHostExternalName string + + // MaxTime is the max time to wait + MaxTime time.Duration + + // PollInterval is the interval to check + PollInterval time.Duration +} + +// ModifyWebhookDefinitions modifies webhook definitions by: +// - applying CABundle based on the provided tinyca +// - if webhook client config uses service spec, it's removed and replaced with direct url. +func (o *WebhookInstallOptions) ModifyWebhookDefinitions() error { + caData := o.LocalServingCAData + + // generate host port. + hostPort, err := o.generateHostPort() + if err != nil { + return err + } + + for i := range o.MutatingWebhooks { + for j := range o.MutatingWebhooks[i].Webhooks { + updateClientConfig(&o.MutatingWebhooks[i].Webhooks[j].ClientConfig, hostPort, caData) + } + } + + for i := range o.ValidatingWebhooks { + for j := range o.ValidatingWebhooks[i].Webhooks { + updateClientConfig(&o.ValidatingWebhooks[i].Webhooks[j].ClientConfig, hostPort, caData) + } + } + return nil +} + +func updateClientConfig(cc *admissionv1.WebhookClientConfig, hostPort string, caData []byte) { + cc.CABundle = caData + if cc.Service != nil && cc.Service.Path != nil { + url := fmt.Sprintf("https://%s/%s", hostPort, *cc.Service.Path) + cc.URL = &url + cc.Service = nil + } +} + +func (o *WebhookInstallOptions) generateHostPort() (string, error) { + if o.LocalServingPort == 0 { + port, host, err := addr.Suggest(o.LocalServingHost) + if err != nil { + return "", fmt.Errorf("unable to grab random port for serving webhooks on: %w", err) + } + o.LocalServingPort = port + o.LocalServingHost = host + } + host := o.LocalServingHostExternalName + if host == "" { + host = o.LocalServingHost + } + return net.JoinHostPort(host, fmt.Sprintf("%d", o.LocalServingPort)), nil +} + +// PrepWithoutInstalling does the setup parts of Install (populating host-port, +// setting up CAs, etc), without actually truing to do anything with webhook +// definitions. This is largely useful for internal testing of +// controller-runtime, where we need a random host-port & caData for webhook +// tests, but may be useful in similar scenarios. +func (o *WebhookInstallOptions) PrepWithoutInstalling() error { + if err := o.setupCA(); err != nil { + return err + } + + if err := parseWebhook(o); err != nil { + return err + } + + return o.ModifyWebhookDefinitions() +} + +// Install installs specified webhooks to the API server. +func (o *WebhookInstallOptions) Install(config *rest.Config) error { + defaultWebhookOptions(o) + + if len(o.LocalServingCAData) == 0 { + if err := o.PrepWithoutInstalling(); err != nil { + return err + } + } + + if err := createWebhooks(config, o.MutatingWebhooks, o.ValidatingWebhooks); err != nil { + return err + } + + return WaitForWebhooks(config, o.MutatingWebhooks, o.ValidatingWebhooks, *o) +} + +// Cleanup cleans up cert directories. +func (o *WebhookInstallOptions) Cleanup() error { + if o.LocalServingCertDir != "" { + return os.RemoveAll(o.LocalServingCertDir) + } + return nil +} + +// defaultWebhookOptions sets the default values for Webhooks. +func defaultWebhookOptions(o *WebhookInstallOptions) { + if o.MaxTime == 0 { + o.MaxTime = defaultMaxWait + } + if o.PollInterval == 0 { + o.PollInterval = defaultPollInterval + } +} + +// WaitForWebhooks waits for the Webhooks to be available through API server. +func WaitForWebhooks(config *rest.Config, + mutatingWebhooks []*admissionv1.MutatingWebhookConfiguration, + validatingWebhooks []*admissionv1.ValidatingWebhookConfiguration, + options WebhookInstallOptions, +) error { + waitingFor := map[schema.GroupVersionKind]*sets.Set[string]{} + + for _, hook := range mutatingWebhooks { + h := hook + gvk, err := apiutil.GVKForObject(h, scheme.Scheme) + if err != nil { + return fmt.Errorf("unable to get gvk for MutatingWebhookConfiguration %s: %w", hook.GetName(), err) + } + + if _, ok := waitingFor[gvk]; !ok { + waitingFor[gvk] = &sets.Set[string]{} + } + waitingFor[gvk].Insert(h.GetName()) + } + + for _, hook := range validatingWebhooks { + h := hook + gvk, err := apiutil.GVKForObject(h, scheme.Scheme) + if err != nil { + return fmt.Errorf("unable to get gvk for ValidatingWebhookConfiguration %s: %w", hook.GetName(), err) + } + + if _, ok := waitingFor[gvk]; !ok { + waitingFor[gvk] = &sets.Set[string]{} + } + waitingFor[gvk].Insert(hook.GetName()) + } + + // Poll until all resources are found in discovery + p := &webhookPoller{config: config, waitingFor: waitingFor} + return wait.PollUntilContextTimeout(context.TODO(), options.PollInterval, options.MaxTime, true, p.poll) +} + +// poller checks if all the resources have been found in discovery, and returns false if not. +type webhookPoller struct { + // config is used to get discovery + config *rest.Config + + // waitingFor is the map of resources keyed by group version that have not yet been found in discovery + waitingFor map[schema.GroupVersionKind]*sets.Set[string] +} + +// poll checks if all the resources have been found in discovery, and returns false if not. +func (p *webhookPoller) poll(ctx context.Context) (done bool, err error) { + // Create a new clientset to avoid any client caching of discovery + c, err := client.New(p.config, client.Options{}) + if err != nil { + return false, err + } + + allFound := true + for gvk, names := range p.waitingFor { + if names.Len() == 0 { + delete(p.waitingFor, gvk) + continue + } + for _, name := range names.UnsortedList() { + obj := &unstructured.Unstructured{} + obj.SetGroupVersionKind(gvk) + err := c.Get(context.Background(), client.ObjectKey{ + Namespace: "", + Name: name, + }, obj) + + if err == nil { + names.Delete(name) + } + + if apierrors.IsNotFound(err) { + allFound = false + } + if err != nil { + return false, err + } + } + } + return allFound, nil +} + +// setupCA creates CA for testing and writes them to disk. +func (o *WebhookInstallOptions) setupCA() error { + hookCA, err := certs.NewTinyCA() + if err != nil { + return fmt.Errorf("unable to set up webhook CA: %w", err) + } + + names := []string{"localhost", o.LocalServingHost, o.LocalServingHostExternalName} + hookCert, err := hookCA.NewServingCert(names...) + if err != nil { + return fmt.Errorf("unable to set up webhook serving certs: %w", err) + } + + localServingCertsDir, err := os.MkdirTemp("", "envtest-serving-certs-") + o.LocalServingCertDir = localServingCertsDir + if err != nil { + return fmt.Errorf("unable to create directory for webhook serving certs: %w", err) + } + + certData, keyData, err := hookCert.AsBytes() + if err != nil { + return fmt.Errorf("unable to marshal webhook serving certs: %w", err) + } + + if err := os.WriteFile(filepath.Join(localServingCertsDir, "tls.crt"), certData, 0640); err != nil { + return fmt.Errorf("unable to write webhook serving cert to disk: %w", err) + } + if err := os.WriteFile(filepath.Join(localServingCertsDir, "tls.key"), keyData, 0640); err != nil { + return fmt.Errorf("unable to write webhook serving key to disk: %w", err) + } + + o.LocalServingCAData = certData + return err +} + +func createWebhooks(config *rest.Config, mutHooks []*admissionv1.MutatingWebhookConfiguration, valHooks []*admissionv1.ValidatingWebhookConfiguration) error { + cs, err := client.New(config, client.Options{}) + if err != nil { + return err + } + + // Create each webhook + for _, hook := range mutHooks { + log.V(1).Info("installing mutating webhook", "webhook", hook.GetName()) + if err := ensureCreated(cs, hook); err != nil { + return err + } + } + for _, hook := range valHooks { + log.V(1).Info("installing validating webhook", "webhook", hook.GetName()) + if err := ensureCreated(cs, hook); err != nil { + return err + } + } + return nil +} + +// ensureCreated creates or update object if already exists in the cluster. +func ensureCreated(cs client.Client, obj client.Object) error { + existing := obj.DeepCopyObject().(client.Object) + err := cs.Get(context.Background(), client.ObjectKey{Name: obj.GetName()}, existing) + switch { + case apierrors.IsNotFound(err): + if err := cs.Create(context.Background(), obj); err != nil { + return err + } + case err != nil: + return err + default: + log.V(1).Info("Webhook configuration already exists, updating", "webhook", obj.GetName()) + obj.SetResourceVersion(existing.GetResourceVersion()) + if err := cs.Update(context.Background(), obj); err != nil { + return err + } + } + return nil +} + +// parseWebhook reads the directories or files of Webhooks in options.Paths and adds the Webhook structs to options. +func parseWebhook(options *WebhookInstallOptions) error { + if len(options.Paths) > 0 { + for _, path := range options.Paths { + _, err := os.Stat(path) + if options.IgnoreErrorIfPathMissing && os.IsNotExist(err) { + continue // skip this path + } + if !options.IgnoreErrorIfPathMissing && os.IsNotExist(err) { + return err // treat missing path as error + } + mutHooks, valHooks, err := readWebhooks(path) + if err != nil { + return err + } + options.MutatingWebhooks = append(options.MutatingWebhooks, mutHooks...) + options.ValidatingWebhooks = append(options.ValidatingWebhooks, valHooks...) + } + } + return nil +} + +// readWebhooks reads the Webhooks from files and Unmarshals them into structs +// returns slice of mutating and validating webhook configurations. +func readWebhooks(path string) ([]*admissionv1.MutatingWebhookConfiguration, []*admissionv1.ValidatingWebhookConfiguration, error) { + // Get the webhook files + var files []string + var err error + log.V(1).Info("reading Webhooks from path", "path", path) + info, err := os.Stat(path) + if err != nil { + return nil, nil, err + } + if !info.IsDir() { + path, files = filepath.Dir(path), []string{info.Name()} + } else { + entries, err := os.ReadDir(path) + if err != nil { + return nil, nil, err + } + for _, e := range entries { + files = append(files, e.Name()) + } + } + + // file extensions that may contain Webhooks + resourceExtensions := sets.NewString(".json", ".yaml", ".yml") + + var mutHooks []*admissionv1.MutatingWebhookConfiguration + var valHooks []*admissionv1.ValidatingWebhookConfiguration + for _, file := range files { + // Only parse allowlisted file types + if !resourceExtensions.Has(filepath.Ext(file)) { + continue + } + + // Unmarshal Webhooks from file into structs + docs, err := readDocuments(filepath.Join(path, file)) + if err != nil { + return nil, nil, err + } + + for _, doc := range docs { + var generic metav1.PartialObjectMetadata + if err = yaml.Unmarshal(doc, &generic); err != nil { + return nil, nil, err + } + + const ( + admissionregv1 = "admissionregistration.k8s.io/v1" + ) + switch generic.Kind { + case "MutatingWebhookConfiguration": + if generic.APIVersion != admissionregv1 { + return nil, nil, fmt.Errorf("only v1 is supported right now for MutatingWebhookConfiguration (name: %s)", generic.Name) + } + hook := &admissionv1.MutatingWebhookConfiguration{} + if err := yaml.Unmarshal(doc, hook); err != nil { + return nil, nil, err + } + mutHooks = append(mutHooks, hook) + case "ValidatingWebhookConfiguration": + if generic.APIVersion != admissionregv1 { + return nil, nil, fmt.Errorf("only v1 is supported right now for ValidatingWebhookConfiguration (name: %s)", generic.Name) + } + hook := &admissionv1.ValidatingWebhookConfiguration{} + if err := yaml.Unmarshal(doc, hook); err != nil { + return nil, nil, err + } + valHooks = append(valHooks, hook) + default: + continue + } + } + + log.V(1).Info("read webhooks from file", "file", file) + } + return mutHooks, valHooks, nil +} diff --git a/pkg/envtest/webhook_test.go b/pkg/envtest/webhook_test.go new file mode 100644 index 0000000000..47550fa147 --- /dev/null +++ b/pkg/envtest/webhook_test.go @@ -0,0 +1,124 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package envtest + +import ( + "context" + "crypto/tls" + "path/filepath" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +var _ = Describe("Test", func() { + Describe("Webhook", func() { + It("should reject create request for webhook that rejects all requests", func(specCtx SpecContext) { + m, err := manager.New(env.Config, manager.Options{ + WebhookServer: webhook.NewServer(webhook.Options{ + Port: env.WebhookInstallOptions.LocalServingPort, + Host: env.WebhookInstallOptions.LocalServingHost, + CertDir: env.WebhookInstallOptions.LocalServingCertDir, + TLSOpts: []func(*tls.Config){func(config *tls.Config) {}}, + }), + }) // we need manager here just to leverage manager.SetFields + Expect(err).NotTo(HaveOccurred()) + server := m.GetWebhookServer() + server.Register("/failing", &webhook.Admission{Handler: &rejectingValidator{}}) + + ctx, cancel := context.WithCancel(specCtx) + go func() { + _ = server.Start(ctx) + }() + + c, err := client.New(env.Config, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + obj := &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + }, + }, + } + + Eventually(func() bool { + err = c.Create(ctx, obj) + return err != nil && strings.HasSuffix(err.Error(), "Always denied") && apierrors.ReasonForError(err) == metav1.StatusReasonForbidden + }, 1*time.Second).Should(BeTrue()) + + cancel() + }) + + It("should load webhooks from directory", func() { + installOptions := WebhookInstallOptions{ + Paths: []string{filepath.Join("testdata", "webhooks")}, + } + err := parseWebhook(&installOptions) + Expect(err).NotTo(HaveOccurred()) + Expect(installOptions.MutatingWebhooks).To(HaveLen(2)) + Expect(installOptions.ValidatingWebhooks).To(HaveLen(2)) + }) + + It("should load webhooks from files", func() { + installOptions := WebhookInstallOptions{ + Paths: []string{filepath.Join("testdata", "webhooks", "manifests.yaml")}, + } + err := parseWebhook(&installOptions) + Expect(err).NotTo(HaveOccurred()) + Expect(installOptions.MutatingWebhooks).To(HaveLen(2)) + Expect(installOptions.ValidatingWebhooks).To(HaveLen(2)) + }) + }) +}) + +type rejectingValidator struct { +} + +func (v *rejectingValidator) Handle(_ context.Context, _ admission.Request) admission.Response { + return admission.Denied("Always denied") +} diff --git a/pkg/event/event.go b/pkg/event/event.go index 6aa50bf301..82b1793f53 100644 --- a/pkg/event/event.go +++ b/pkg/event/event.go @@ -16,58 +16,60 @@ limitations under the License. package event -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) +import "sigs.k8s.io/controller-runtime/pkg/client" -// CreateEvent is an event where a Kubernetes object was created. CreateEvent should be generated +// CreateEvent is an event where a Kubernetes object was created. CreateEvent should be generated +// by a source.Source and transformed into a reconcile.Request by a handler.EventHandler. +type CreateEvent = TypedCreateEvent[client.Object] + +// UpdateEvent is an event where a Kubernetes object was updated. UpdateEvent should be generated +// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler. +type UpdateEvent = TypedUpdateEvent[client.Object] + +// DeleteEvent is an event where a Kubernetes object was deleted. DeleteEvent should be generated // by a source.Source and transformed into a reconcile.Request by an handler.EventHandler. -type CreateEvent struct { - // Meta is the ObjectMeta of the Kubernetes Type that was created - Meta metav1.Object +type DeleteEvent = TypedDeleteEvent[client.Object] +// GenericEvent is an event where the operation type is unknown (e.g. polling or event originating outside the cluster). +// GenericEvent should be generated by a source.Source and transformed into a reconcile.Request by an +// handler.EventHandler. +type GenericEvent = TypedGenericEvent[client.Object] + +// TypedCreateEvent is an event where a Kubernetes object was created. TypedCreateEvent should be generated +// by a source.Source and transformed into a reconcile.Request by an handler.TypedEventHandler. +type TypedCreateEvent[object any] struct { // Object is the object from the event - Object runtime.Object -} + Object object -// UpdateEvent is an event where a Kubernetes object was updated. UpdateEvent should be generated -// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler. -type UpdateEvent struct { - // MetaOld is the ObjectMeta of the Kubernetes Type that was updated (before the update) - MetaOld metav1.Object + // IsInInitialList is true if the Create event was triggered by the initial list. + IsInInitialList bool +} +// TypedUpdateEvent is an event where a Kubernetes object was updated. TypedUpdateEvent should be generated +// by a source.Source and transformed into a reconcile.Request by an handler.TypedEventHandler. +type TypedUpdateEvent[object any] struct { // ObjectOld is the object from the event - ObjectOld runtime.Object - - // MetaNew is the ObjectMeta of the Kubernetes Type that was updated (after the update) - MetaNew metav1.Object + ObjectOld object // ObjectNew is the object from the event - ObjectNew runtime.Object + ObjectNew object } -// DeleteEvent is an event where a Kubernetes object was deleted. DeleteEvent should be generated -// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler. -type DeleteEvent struct { - // Meta is the ObjectMeta of the Kubernetes Type that was deleted - Meta metav1.Object - +// TypedDeleteEvent is an event where a Kubernetes object was deleted. TypedDeleteEvent should be generated +// by a source.Source and transformed into a reconcile.Request by an handler.TypedEventHandler. +type TypedDeleteEvent[object any] struct { // Object is the object from the event - Object runtime.Object + Object object // DeleteStateUnknown is true if the Delete event was missed but we identified the object // as having been deleted. DeleteStateUnknown bool } -// GenericEvent is an event where the operation type is unknown (e.g. polling or event originating outside the cluster). -// GenericEvent should be generated by a source.Source and transformed into a reconcile.Request by an -// handler.EventHandler. -type GenericEvent struct { - // Meta is the ObjectMeta of a Kubernetes Type this event is for - Meta metav1.Object - +// TypedGenericEvent is an event where the operation type is unknown (e.g. polling or event originating outside the cluster). +// TypedGenericEvent should be generated by a source.Source and transformed into a reconcile.Request by an +// handler.TypedEventHandler. +type TypedGenericEvent[object any] struct { // Object is the object from the event - Object runtime.Object + Object object } diff --git a/pkg/finalizer/finalizer.go b/pkg/finalizer/finalizer.go new file mode 100644 index 0000000000..10c5645dbe --- /dev/null +++ b/pkg/finalizer/finalizer.go @@ -0,0 +1,79 @@ +/* +Copyright 2021 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package finalizer + +import ( + "context" + "fmt" + + kerrors "k8s.io/apimachinery/pkg/util/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +type finalizers map[string]Finalizer + +// Result struct holds information about what parts of an object were updated by finalizer(s). +type Result struct { + // Updated will be true if at least one of the object's non-status field + // was updated by some registered finalizer. + Updated bool + // StatusUpdated will be true if at least one of the object's status' fields + // was updated by some registered finalizer. + StatusUpdated bool +} + +// NewFinalizers returns the Finalizers interface. +func NewFinalizers() Finalizers { + return finalizers{} +} + +func (f finalizers) Register(key string, finalizer Finalizer) error { + if _, ok := f[key]; ok { + return fmt.Errorf("finalizer for key %q already registered", key) + } + f[key] = finalizer + return nil +} + +func (f finalizers) Finalize(ctx context.Context, obj client.Object) (Result, error) { + var ( + res Result + errList []error + ) + res.Updated = false + for key, finalizer := range f { + if dt := obj.GetDeletionTimestamp(); dt.IsZero() && !controllerutil.ContainsFinalizer(obj, key) { + controllerutil.AddFinalizer(obj, key) + res.Updated = true + } else if !dt.IsZero() && controllerutil.ContainsFinalizer(obj, key) { + finalizerRes, err := finalizer.Finalize(ctx, obj) + if err != nil { + // Even when the finalizer fails, it may need to signal to update the primary + // object (e.g. it may set a condition and need a status update). + res.Updated = res.Updated || finalizerRes.Updated + res.StatusUpdated = res.StatusUpdated || finalizerRes.StatusUpdated + errList = append(errList, fmt.Errorf("finalizer %q failed: %w", key, err)) + } else { + // If the finalizer succeeds, we remove the finalizer from the primary + // object's metadata, so we know it will need an update. + res.Updated = true + controllerutil.RemoveFinalizer(obj, key) + // The finalizer may have updated the status too. + res.StatusUpdated = res.StatusUpdated || finalizerRes.StatusUpdated + } + } + } + return res, kerrors.NewAggregate(errList) +} diff --git a/pkg/finalizer/finalizer_test.go b/pkg/finalizer/finalizer_test.go new file mode 100644 index 0000000000..c6848f6473 --- /dev/null +++ b/pkg/finalizer/finalizer_test.go @@ -0,0 +1,215 @@ +package finalizer + +import ( + "context" + "fmt" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type mockFinalizer struct { + result Result + err error +} + +func (f mockFinalizer) Finalize(context.Context, client.Object) (Result, error) { + return f.result, f.err +} + +func TestFinalizer(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Finalizer Suite") +} + +var _ = Describe("TestFinalizer", func() { + var err error + var pod *corev1.Pod + var finalizers Finalizers + var f mockFinalizer + BeforeEach(func() { + pod = &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{}, + } + finalizers = NewFinalizers() + f = mockFinalizer{} + }) + Describe("Register", func() { + It("successfully registers a finalizer", func() { + err = finalizers.Register("finalizers.sigs.k8s.io/testfinalizer", f) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should fail when trying to register a finalizer that was already registered", func() { + err = finalizers.Register("finalizers.sigs.k8s.io/testfinalizer", f) + Expect(err).ToNot(HaveOccurred()) + + // calling Register again with the same key should return an error + err = finalizers.Register("finalizers.sigs.k8s.io/testfinalizer", f) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("already registered")) + + }) + }) + + Describe("Finalize", func() { + It("successfully finalizes and returns true for Updated when deletion timestamp is nil and finalizer does not exist", func(ctx SpecContext) { + err = finalizers.Register("finalizers.sigs.k8s.io/testfinalizer", f) + Expect(err).ToNot(HaveOccurred()) + + pod.DeletionTimestamp = nil + pod.Finalizers = []string{} + + result, err := finalizers.Finalize(ctx, pod) + Expect(err).ToNot(HaveOccurred()) + Expect(result.Updated).To(BeTrue()) + // when deletion timestamp is nil and finalizer is not present, the registered finalizer would be added to the obj + Expect(pod.Finalizers).To(HaveLen(1)) + Expect(pod.Finalizers[0]).To(Equal("finalizers.sigs.k8s.io/testfinalizer")) + + }) + + It("successfully finalizes and returns true for Updated when deletion timestamp is not nil and the finalizer exists", func(ctx SpecContext) { + now := metav1.Now() + pod.DeletionTimestamp = &now + + err = finalizers.Register("finalizers.sigs.k8s.io/testfinalizer", f) + Expect(err).ToNot(HaveOccurred()) + + pod.Finalizers = []string{"finalizers.sigs.k8s.io/testfinalizer"} + + result, err := finalizers.Finalize(ctx, pod) + Expect(err).ToNot(HaveOccurred()) + Expect(result.Updated).To(BeTrue()) + // finalizer will be removed from the obj upon successful finalization + Expect(pod.Finalizers).To(BeEmpty()) + }) + + It("should return no error and return false for Updated when deletion timestamp is nil and finalizer doesn't exist", func(ctx SpecContext) { + pod.DeletionTimestamp = nil + pod.Finalizers = []string{} + + result, err := finalizers.Finalize(ctx, pod) + Expect(err).ToNot(HaveOccurred()) + Expect(result.Updated).To(BeFalse()) + Expect(pod.Finalizers).To(BeEmpty()) + + }) + + It("should return no error and return false for Updated when deletion timestamp is not nil and the finalizer doesn't exist", func(ctx SpecContext) { + now := metav1.Now() + pod.DeletionTimestamp = &now + pod.Finalizers = []string{} + + result, err := finalizers.Finalize(ctx, pod) + Expect(err).ToNot(HaveOccurred()) + Expect(result.Updated).To(BeFalse()) + Expect(pod.Finalizers).To(BeEmpty()) + + }) + + It("successfully finalizes multiple finalizers and returns true for Updated when deletion timestamp is not nil and the finalizer exists", func(ctx SpecContext) { + now := metav1.Now() + pod.DeletionTimestamp = &now + + err = finalizers.Register("finalizers.sigs.k8s.io/testfinalizer", f) + Expect(err).ToNot(HaveOccurred()) + + err = finalizers.Register("finalizers.sigs.k8s.io/newtestfinalizer", f) + Expect(err).ToNot(HaveOccurred()) + + pod.Finalizers = []string{"finalizers.sigs.k8s.io/testfinalizer", "finalizers.sigs.k8s.io/newtestfinalizer"} + + result, err := finalizers.Finalize(ctx, pod) + Expect(err).ToNot(HaveOccurred()) + Expect(result.Updated).To(BeTrue()) + Expect(result.StatusUpdated).To(BeFalse()) + Expect(pod.Finalizers).To(BeEmpty()) + }) + + It("should return result as false and a non-nil error", func(ctx SpecContext) { + now := metav1.Now() + pod.DeletionTimestamp = &now + pod.Finalizers = []string{"finalizers.sigs.k8s.io/testfinalizer"} + + f.result.Updated = false + f.result.StatusUpdated = false + f.err = fmt.Errorf("finalizer failed for %q", pod.Finalizers[0]) + + err = finalizers.Register("finalizers.sigs.k8s.io/testfinalizer", f) + Expect(err).ToNot(HaveOccurred()) + + result, err := finalizers.Finalize(ctx, pod) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("finalizer failed")) + Expect(result.Updated).To(BeFalse()) + Expect(result.StatusUpdated).To(BeFalse()) + Expect(pod.Finalizers).To(HaveLen(1)) + Expect(pod.Finalizers[0]).To(Equal("finalizers.sigs.k8s.io/testfinalizer")) + }) + + It("should return expected result values and error values when registering multiple finalizers", func(ctx SpecContext) { + now := metav1.Now() + pod.DeletionTimestamp = &now + pod.Finalizers = []string{ + "finalizers.sigs.k8s.io/testfinalizer1", + "finalizers.sigs.k8s.io/testfinalizer2", + "finalizers.sigs.k8s.io/testfinalizer3", + } + + // registering multiple finalizers with different return values + // test for Updated as true, and nil error + f.result.Updated = true + f.result.StatusUpdated = false + f.err = nil + err = finalizers.Register("finalizers.sigs.k8s.io/testfinalizer1", f) + Expect(err).ToNot(HaveOccurred()) + + result, err := finalizers.Finalize(ctx, pod) + Expect(err).ToNot(HaveOccurred()) + Expect(result.Updated).To(BeTrue()) + Expect(result.StatusUpdated).To(BeFalse()) + // `finalizers.sigs.k8s.io/testfinalizer1` will be removed from the list + // of finalizers, so length will be 2. + Expect(pod.Finalizers).To(HaveLen(2)) + Expect(pod.Finalizers[0]).To(Equal("finalizers.sigs.k8s.io/testfinalizer2")) + Expect(pod.Finalizers[1]).To(Equal("finalizers.sigs.k8s.io/testfinalizer3")) + + // test for Updated and StatusUpdated as false, and non-nil error + f.result.Updated = false + f.result.StatusUpdated = false + f.err = fmt.Errorf("finalizer failed") + err = finalizers.Register("finalizers.sigs.k8s.io/testfinalizer2", f) + Expect(err).ToNot(HaveOccurred()) + + result, err = finalizers.Finalize(ctx, pod) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("finalizer failed")) + Expect(result.Updated).To(BeFalse()) + Expect(result.StatusUpdated).To(BeFalse()) + Expect(pod.Finalizers).To(HaveLen(2)) + Expect(pod.Finalizers[0]).To(Equal("finalizers.sigs.k8s.io/testfinalizer2")) + Expect(pod.Finalizers[1]).To(Equal("finalizers.sigs.k8s.io/testfinalizer3")) + + // test for result as true, and non-nil error + f.result.Updated = true + f.result.StatusUpdated = true + f.err = fmt.Errorf("finalizer failed") + err = finalizers.Register("finalizers.sigs.k8s.io/testfinalizer3", f) + Expect(err).ToNot(HaveOccurred()) + + result, err = finalizers.Finalize(ctx, pod) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("finalizer failed")) + Expect(result.Updated).To(BeTrue()) + Expect(result.StatusUpdated).To(BeTrue()) + Expect(pod.Finalizers).To(HaveLen(2)) + Expect(pod.Finalizers[0]).To(Equal("finalizers.sigs.k8s.io/testfinalizer2")) + Expect(pod.Finalizers[1]).To(Equal("finalizers.sigs.k8s.io/testfinalizer3")) + }) + }) +}) diff --git a/pkg/finalizer/types.go b/pkg/finalizer/types.go new file mode 100644 index 0000000000..e3a002a935 --- /dev/null +++ b/pkg/finalizer/types.go @@ -0,0 +1,42 @@ +/* +Copyright 2021 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package finalizer + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Registerer holds Register that will check if a key is already registered +// and error out and it does; and if not registered, it will add the finalizer +// to the finalizers map as the value for the provided key. +type Registerer interface { + Register(key string, f Finalizer) error +} + +// Finalizer holds Finalize that will add/remove a finalizer based on the +// deletion timestamp being set and return an indication of whether the +// obj needs an update or not. +type Finalizer interface { + Finalize(context.Context, client.Object) (Result, error) +} + +// Finalizers implements Registerer and Finalizer to finalize all registered +// finalizers if the provided object has a deletion timestamp or set all +// registered finalizers if it does not. +type Finalizers interface { + Registerer + Finalizer +} diff --git a/pkg/handler/doc.go b/pkg/handler/doc.go index 3b5b79048d..e5fd177aff 100644 --- a/pkg/handler/doc.go +++ b/pkg/handler/doc.go @@ -21,7 +21,7 @@ Controller.Watch in order to generate and enqueue reconcile.Request work items. Generally, following premade event handlers should be sufficient for most use cases: -EventHandlers +EventHandlers: EnqueueRequestForObject - Enqueues a reconcile.Request containing the Name and Namespace of the object in the Event. This will cause the object that was the source of the Event (e.g. the created / deleted / updated object) to be diff --git a/pkg/handler/enqueue.go b/pkg/handler/enqueue.go index 2464c8b671..64cbe8a4d1 100644 --- a/pkg/handler/enqueue.go +++ b/pkg/handler/enqueue.go @@ -17,8 +17,12 @@ limitations under the License. package handler import ( + "context" + "reflect" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -26,66 +30,91 @@ import ( var enqueueLog = logf.RuntimeLog.WithName("eventhandler").WithName("EnqueueRequestForObject") +type empty struct{} + var _ EventHandler = &EnqueueRequestForObject{} // EnqueueRequestForObject enqueues a Request containing the Name and Namespace of the object that is the source of the Event. -// (e.g. the created / deleted / updated objects Name and Namespace). handler.EnqueueRequestForObject is used by almost all +// (e.g. the created / deleted / updated objects Name and Namespace). handler.EnqueueRequestForObject is used by almost all +// Controllers that have associated Resources (e.g. CRDs) to reconcile the associated Resource. +type EnqueueRequestForObject = TypedEnqueueRequestForObject[client.Object] + +// TypedEnqueueRequestForObject enqueues a Request containing the Name and Namespace of the object that is the source of the Event. +// (e.g. the created / deleted / updated objects Name and Namespace). handler.TypedEnqueueRequestForObject is used by almost all // Controllers that have associated Resources (e.g. CRDs) to reconcile the associated Resource. -type EnqueueRequestForObject struct{} +// +// TypedEnqueueRequestForObject is experimental and subject to future change. +type TypedEnqueueRequestForObject[object client.Object] struct{} -// Create implements EventHandler -func (e *EnqueueRequestForObject) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { - if evt.Meta == nil { +// Create implements EventHandler. +func (e *TypedEnqueueRequestForObject[T]) Create(ctx context.Context, evt event.TypedCreateEvent[T], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + if isNil(evt.Object) { enqueueLog.Error(nil, "CreateEvent received with no metadata", "event", evt) return } - q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ - Name: evt.Meta.GetName(), - Namespace: evt.Meta.GetNamespace(), - }}) + + item := reconcile.Request{NamespacedName: types.NamespacedName{ + Name: evt.Object.GetName(), + Namespace: evt.Object.GetNamespace(), + }} + + addToQueueCreate(q, evt, item) } -// Update implements EventHandler -func (e *EnqueueRequestForObject) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { - if evt.MetaOld != nil { - q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ - Name: evt.MetaOld.GetName(), - Namespace: evt.MetaOld.GetNamespace(), - }}) - } else { - enqueueLog.Error(nil, "UpdateEvent received with no old metadata", "event", evt) - } +// Update implements EventHandler. +func (e *TypedEnqueueRequestForObject[T]) Update(ctx context.Context, evt event.TypedUpdateEvent[T], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + switch { + case !isNil(evt.ObjectNew): + item := reconcile.Request{NamespacedName: types.NamespacedName{ + Name: evt.ObjectNew.GetName(), + Namespace: evt.ObjectNew.GetNamespace(), + }} + + addToQueueUpdate(q, evt, item) + case !isNil(evt.ObjectOld): + item := reconcile.Request{NamespacedName: types.NamespacedName{ + Name: evt.ObjectOld.GetName(), + Namespace: evt.ObjectOld.GetNamespace(), + }} - if evt.MetaNew != nil { - q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ - Name: evt.MetaNew.GetName(), - Namespace: evt.MetaNew.GetNamespace(), - }}) - } else { - enqueueLog.Error(nil, "UpdateEvent received with no new metadata", "event", evt) + addToQueueUpdate(q, evt, item) + default: + enqueueLog.Error(nil, "UpdateEvent received with no metadata", "event", evt) } } -// Delete implements EventHandler -func (e *EnqueueRequestForObject) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { - if evt.Meta == nil { +// Delete implements EventHandler. +func (e *TypedEnqueueRequestForObject[T]) Delete(ctx context.Context, evt event.TypedDeleteEvent[T], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + if isNil(evt.Object) { enqueueLog.Error(nil, "DeleteEvent received with no metadata", "event", evt) return } q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ - Name: evt.Meta.GetName(), - Namespace: evt.Meta.GetNamespace(), + Name: evt.Object.GetName(), + Namespace: evt.Object.GetNamespace(), }}) } -// Generic implements EventHandler -func (e *EnqueueRequestForObject) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { - if evt.Meta == nil { +// Generic implements EventHandler. +func (e *TypedEnqueueRequestForObject[T]) Generic(ctx context.Context, evt event.TypedGenericEvent[T], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + if isNil(evt.Object) { enqueueLog.Error(nil, "GenericEvent received with no metadata", "event", evt) return } q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ - Name: evt.Meta.GetName(), - Namespace: evt.Meta.GetNamespace(), + Name: evt.Object.GetName(), + Namespace: evt.Object.GetNamespace(), }}) } + +func isNil(arg any) bool { + if v := reflect.ValueOf(arg); !v.IsValid() || ((v.Kind() == reflect.Ptr || + v.Kind() == reflect.Interface || + v.Kind() == reflect.Slice || + v.Kind() == reflect.Map || + v.Kind() == reflect.Chan || + v.Kind() == reflect.Func) && v.IsNil()) { + return true + } + return false +} diff --git a/pkg/handler/enqueue_mapped.go b/pkg/handler/enqueue_mapped.go index a60790242c..62d6728151 100644 --- a/pkg/handler/enqueue_mapped.go +++ b/pkg/handler/enqueue_mapped.go @@ -17,15 +17,25 @@ limitations under the License. package handler import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" + "context" + "k8s.io/client-go/util/workqueue" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) -var _ EventHandler = &EnqueueRequestsFromMapFunc{} +// MapFunc is the signature required for enqueueing requests from a generic function. +// This type is usually used with EnqueueRequestsFromMapFunc when registering an event handler. +type MapFunc = TypedMapFunc[client.Object, reconcile.Request] + +// TypedMapFunc is the signature required for enqueueing requests from a generic function. +// This type is usually used with EnqueueRequestsFromTypedMapFunc when registering an event handler. +// +// TypedMapFunc is experimental and subject to future change. +type TypedMapFunc[object any, request comparable] func(context.Context, object) []request // EnqueueRequestsFromMapFunc enqueues Requests by running a transformation function that outputs a collection // of reconcile.Requests on each Event. The reconcile.Requests may be for an arbitrary set of objects @@ -37,69 +47,107 @@ var _ EventHandler = &EnqueueRequestsFromMapFunc{} // // For UpdateEvents which contain both a new and old object, the transformation function is run on both // objects and both sets of Requests are enqueue. -type EnqueueRequestsFromMapFunc struct { - // Mapper transforms the argument into a slice of keys to be reconciled - ToRequests Mapper -} - -// Create implements EventHandler -func (e *EnqueueRequestsFromMapFunc) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { - e.mapAndEnqueue(q, MapObject{Meta: evt.Meta, Object: evt.Object}) +func EnqueueRequestsFromMapFunc(fn MapFunc) EventHandler { + return TypedEnqueueRequestsFromMapFunc(fn) } -// Update implements EventHandler -func (e *EnqueueRequestsFromMapFunc) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { - e.mapAndEnqueue(q, MapObject{Meta: evt.MetaOld, Object: evt.ObjectOld}) - e.mapAndEnqueue(q, MapObject{Meta: evt.MetaNew, Object: evt.ObjectNew}) +// TypedEnqueueRequestsFromMapFunc enqueues Requests by running a transformation function that outputs a collection +// of reconcile.Requests on each Event. The reconcile.Requests may be for an arbitrary set of objects +// defined by some user specified transformation of the source Event. (e.g. trigger Reconciler for a set of objects +// in response to a cluster resize event caused by adding or deleting a Node) +// +// TypedEnqueueRequestsFromMapFunc is frequently used to fan-out updates from one object to one or more other +// objects of a differing type. +// +// For TypedUpdateEvents which contain both a new and old object, the transformation function is run on both +// objects and both sets of Requests are enqueue. +// +// TypedEnqueueRequestsFromMapFunc is experimental and subject to future change. +func TypedEnqueueRequestsFromMapFunc[object any, request comparable](fn TypedMapFunc[object, request]) TypedEventHandler[object, request] { + return &enqueueRequestsFromMapFunc[object, request]{ + toRequests: fn, + objectImplementsClientObject: implementsClientObject[object](), + } } -// Delete implements EventHandler -func (e *EnqueueRequestsFromMapFunc) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { - e.mapAndEnqueue(q, MapObject{Meta: evt.Meta, Object: evt.Object}) -} +var _ EventHandler = &enqueueRequestsFromMapFunc[client.Object, reconcile.Request]{} -// Generic implements EventHandler -func (e *EnqueueRequestsFromMapFunc) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { - e.mapAndEnqueue(q, MapObject{Meta: evt.Meta, Object: evt.Object}) +type enqueueRequestsFromMapFunc[object any, request comparable] struct { + // Mapper transforms the argument into a slice of keys to be reconciled + toRequests TypedMapFunc[object, request] + objectImplementsClientObject bool } -func (e *EnqueueRequestsFromMapFunc) mapAndEnqueue(q workqueue.RateLimitingInterface, object MapObject) { - for _, req := range e.ToRequests.Map(object) { - q.Add(req) +// Create implements EventHandler. +func (e *enqueueRequestsFromMapFunc[object, request]) Create( + ctx context.Context, + evt event.TypedCreateEvent[object], + q workqueue.TypedRateLimitingInterface[request], +) { + reqs := map[request]empty{} + + var lowPriority bool + if isPriorityQueue(q) && !isNil(evt.Object) { + if evt.IsInInitialList { + lowPriority = true + } } + e.mapAndEnqueue(ctx, q, evt.Object, reqs, lowPriority) } -// EnqueueRequestsFromMapFunc can inject fields into the mapper. - -// InjectFunc implements inject.Injector. -func (e *EnqueueRequestsFromMapFunc) InjectFunc(f inject.Func) error { - if f == nil { - return nil +// Update implements EventHandler. +func (e *enqueueRequestsFromMapFunc[object, request]) Update( + ctx context.Context, + evt event.TypedUpdateEvent[object], + q workqueue.TypedRateLimitingInterface[request], +) { + var lowPriority bool + if e.objectImplementsClientObject && isPriorityQueue(q) && !isNil(evt.ObjectOld) && !isNil(evt.ObjectNew) { + lowPriority = any(evt.ObjectOld).(client.Object).GetResourceVersion() == any(evt.ObjectNew).(client.Object).GetResourceVersion() } - return f(e.ToRequests) + reqs := map[request]empty{} + e.mapAndEnqueue(ctx, q, evt.ObjectOld, reqs, lowPriority) + e.mapAndEnqueue(ctx, q, evt.ObjectNew, reqs, lowPriority) } -// Mapper maps an object to a collection of keys to be enqueued -type Mapper interface { - // Map maps an object - Map(MapObject) []reconcile.Request +// Delete implements EventHandler. +func (e *enqueueRequestsFromMapFunc[object, request]) Delete( + ctx context.Context, + evt event.TypedDeleteEvent[object], + q workqueue.TypedRateLimitingInterface[request], +) { + reqs := map[request]empty{} + e.mapAndEnqueue(ctx, q, evt.Object, reqs, false) } -// MapObject contains information from an event to be transformed into a Request. -type MapObject struct { - // Meta is the meta data for an object from an event. - Meta metav1.Object - - // Object is the object from an event. - Object runtime.Object +// Generic implements EventHandler. +func (e *enqueueRequestsFromMapFunc[object, request]) Generic( + ctx context.Context, + evt event.TypedGenericEvent[object], + q workqueue.TypedRateLimitingInterface[request], +) { + reqs := map[request]empty{} + e.mapAndEnqueue(ctx, q, evt.Object, reqs, false) } -var _ Mapper = ToRequestsFunc(nil) - -// ToRequestsFunc implements Mapper using a function. -type ToRequestsFunc func(MapObject) []reconcile.Request - -// Map implements Mapper -func (m ToRequestsFunc) Map(i MapObject) []reconcile.Request { - return m(i) +func (e *enqueueRequestsFromMapFunc[object, request]) mapAndEnqueue( + ctx context.Context, + q workqueue.TypedRateLimitingInterface[request], + o object, + reqs map[request]empty, + lowPriority bool, +) { + for _, req := range e.toRequests(ctx, o) { + _, ok := reqs[req] + if !ok { + if lowPriority { + q.(priorityqueue.PriorityQueue[request]).AddWithOpts(priorityqueue.AddOpts{ + Priority: ptr.To(LowPriority), + }, req) + } else { + q.Add(req) + } + reqs[req] = empty{} + } + } } diff --git a/pkg/handler/enqueue_owner.go b/pkg/handler/enqueue_owner.go index 17d512696c..e8fc8eb46e 100644 --- a/pkg/handler/enqueue_owner.go +++ b/pkg/handler/enqueue_owner.go @@ -17,6 +17,7 @@ limitations under the License. package handler import ( + "context" "fmt" "k8s.io/apimachinery/pkg/api/meta" @@ -25,15 +26,18 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) -var _ EventHandler = &EnqueueRequestForOwner{} +var _ EventHandler = &enqueueRequestForOwner[client.Object]{} -var log = logf.RuntimeLog.WithName("eventhandler").WithName("EnqueueRequestForOwner") +var log = logf.RuntimeLog.WithName("eventhandler").WithName("enqueueRequestForOwner") + +// OwnerOption modifies an EnqueueRequestForOwner EventHandler. +type OwnerOption func(e enqueueRequestForOwnerInterface) // EnqueueRequestForOwner enqueues Requests for the Owners of an object. E.g. the object that created // the object that was the source of the Event. @@ -42,13 +46,52 @@ var log = logf.RuntimeLog.WithName("eventhandler").WithName("EnqueueRequestForOw // // - a source.Kind Source with Type of Pod. // -// - a handler.EnqueueRequestForOwner EventHandler with an OwnerType of ReplicaSet and IsController set to true. -type EnqueueRequestForOwner struct { - // OwnerType is the type of the Owner object to look for in OwnerReferences. Only Group and Kind are compared. - OwnerType runtime.Object +// - a handler.enqueueRequestForOwner EventHandler with an OwnerType of ReplicaSet and OnlyControllerOwner set to true. +func EnqueueRequestForOwner(scheme *runtime.Scheme, mapper meta.RESTMapper, ownerType client.Object, opts ...OwnerOption) EventHandler { + return TypedEnqueueRequestForOwner[client.Object](scheme, mapper, ownerType, opts...) +} - // IsController if set will only look at the first OwnerReference with Controller: true. - IsController bool +// TypedEnqueueRequestForOwner enqueues Requests for the Owners of an object. E.g. the object that created +// the object that was the source of the Event. +// +// If a ReplicaSet creates Pods, users may reconcile the ReplicaSet in response to Pod Events using: +// +// - a source.Kind Source with Type of Pod. +// +// - a handler.typedEnqueueRequestForOwner EventHandler with an OwnerType of ReplicaSet and OnlyControllerOwner set to true. +// +// TypedEnqueueRequestForOwner is experimental and subject to future change. +func TypedEnqueueRequestForOwner[object client.Object](scheme *runtime.Scheme, mapper meta.RESTMapper, ownerType client.Object, opts ...OwnerOption) TypedEventHandler[object, reconcile.Request] { + e := &enqueueRequestForOwner[object]{ + ownerType: ownerType, + mapper: mapper, + } + if err := e.parseOwnerTypeGroupKind(scheme); err != nil { + panic(err) + } + for _, opt := range opts { + opt(e) + } + return WithLowPriorityWhenUnchanged(e) +} + +// OnlyControllerOwner if provided will only look at the first OwnerReference with Controller: true. +func OnlyControllerOwner() OwnerOption { + return func(e enqueueRequestForOwnerInterface) { + e.setIsController(true) + } +} + +type enqueueRequestForOwnerInterface interface { + setIsController(bool) +} + +type enqueueRequestForOwner[object client.Object] struct { + // ownerType is the type of the Owner object to look for in OwnerReferences. Only Group and Kind are compared. + ownerType runtime.Object + + // isController if set will only look at the first OwnerReference with Controller: true. + isController bool // groupKind is the cached Group and Kind from OwnerType groupKind schema.GroupKind @@ -57,71 +100,79 @@ type EnqueueRequestForOwner struct { mapper meta.RESTMapper } -// Create implements EventHandler -func (e *EnqueueRequestForOwner) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { - for _, req := range e.getOwnerReconcileRequest(evt.Meta) { - q.Add(req) - } +func (e *enqueueRequestForOwner[object]) setIsController(isController bool) { + e.isController = isController } -// Update implements EventHandler -func (e *EnqueueRequestForOwner) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { - for _, req := range e.getOwnerReconcileRequest(evt.MetaOld) { +// Create implements EventHandler. +func (e *enqueueRequestForOwner[object]) Create(ctx context.Context, evt event.TypedCreateEvent[object], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + reqs := map[reconcile.Request]empty{} + e.getOwnerReconcileRequest(evt.Object, reqs) + for req := range reqs { q.Add(req) } - for _, req := range e.getOwnerReconcileRequest(evt.MetaNew) { +} + +// Update implements EventHandler. +func (e *enqueueRequestForOwner[object]) Update(ctx context.Context, evt event.TypedUpdateEvent[object], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + reqs := map[reconcile.Request]empty{} + e.getOwnerReconcileRequest(evt.ObjectOld, reqs) + e.getOwnerReconcileRequest(evt.ObjectNew, reqs) + for req := range reqs { q.Add(req) } } -// Delete implements EventHandler -func (e *EnqueueRequestForOwner) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { - for _, req := range e.getOwnerReconcileRequest(evt.Meta) { +// Delete implements EventHandler. +func (e *enqueueRequestForOwner[object]) Delete(ctx context.Context, evt event.TypedDeleteEvent[object], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + reqs := map[reconcile.Request]empty{} + e.getOwnerReconcileRequest(evt.Object, reqs) + for req := range reqs { q.Add(req) } } -// Generic implements EventHandler -func (e *EnqueueRequestForOwner) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { - for _, req := range e.getOwnerReconcileRequest(evt.Meta) { +// Generic implements EventHandler. +func (e *enqueueRequestForOwner[object]) Generic(ctx context.Context, evt event.TypedGenericEvent[object], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + reqs := map[reconcile.Request]empty{} + e.getOwnerReconcileRequest(evt.Object, reqs) + for req := range reqs { q.Add(req) } } // parseOwnerTypeGroupKind parses the OwnerType into a Group and Kind and caches the result. Returns false // if the OwnerType could not be parsed using the scheme. -func (e *EnqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme) error { +func (e *enqueueRequestForOwner[object]) parseOwnerTypeGroupKind(scheme *runtime.Scheme) error { // Get the kinds of the type - kinds, _, err := scheme.ObjectKinds(e.OwnerType) + kinds, _, err := scheme.ObjectKinds(e.ownerType) if err != nil { - log.Error(err, "Could not get ObjectKinds for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType)) + log.Error(err, "Could not get ObjectKinds for OwnerType", "owner type", fmt.Sprintf("%T", e.ownerType)) return err } // Expect only 1 kind. If there is more than one kind this is probably an edge case such as ListOptions. if len(kinds) != 1 { - err := fmt.Errorf("Expected exactly 1 kind for OwnerType %T, but found %s kinds", e.OwnerType, kinds) - log.Error(nil, "Expected exactly 1 kind for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType), "kinds", kinds) + err := fmt.Errorf("expected exactly 1 kind for OwnerType %T, but found %s kinds", e.ownerType, kinds) + log.Error(nil, "expected exactly 1 kind for OwnerType", "owner type", fmt.Sprintf("%T", e.ownerType), "kinds", kinds) return err - } // Cache the Group and Kind for the OwnerType e.groupKind = schema.GroupKind{Group: kinds[0].Group, Kind: kinds[0].Kind} return nil } -// getOwnerReconcileRequest looks at object and returns a slice of reconcile.Request to reconcile +// getOwnerReconcileRequest looks at object and builds a map of reconcile.Request to reconcile // owners of object that match e.OwnerType. -func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object) []reconcile.Request { +func (e *enqueueRequestForOwner[object]) getOwnerReconcileRequest(obj metav1.Object, result map[reconcile.Request]empty) { // Iterate through the OwnerReferences looking for a match on Group and Kind against what was requested // by the user - var result []reconcile.Request - for _, ref := range e.getOwnersReferences(object) { + for _, ref := range e.getOwnersReferences(obj) { // Parse the Group out of the OwnerReference to compare it to what was parsed out of the requested OwnerType refGV, err := schema.ParseGroupVersion(ref.APIVersion) if err != nil { log.Error(err, "Could not parse OwnerReference APIVersion", "api version", ref.APIVersion) - return nil + return } // Compare the OwnerReference Group and Kind against the OwnerType Group and Kind specified by the user. @@ -134,55 +185,37 @@ func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object) Name: ref.Name, }} - // if owner is not namespaced then we should set the namespace to the empty + // if owner is not namespaced then we should not set the namespace mapping, err := e.mapper.RESTMapping(e.groupKind, refGV.Version) if err != nil { log.Error(err, "Could not retrieve rest mapping", "kind", e.groupKind) - return nil + return } if mapping.Scope.Name() != meta.RESTScopeNameRoot { - request.Namespace = object.GetNamespace() + request.Namespace = obj.GetNamespace() } - result = append(result, request) + result[request] = empty{} } } - - // Return the matches - return result } -// getOwnersReferences returns the OwnerReferences for an object as specified by the EnqueueRequestForOwner +// getOwnersReferences returns the OwnerReferences for an object as specified by the enqueueRequestForOwner // - if IsController is true: only take the Controller OwnerReference (if found) -// - if IsController is false: take all OwnerReferences -func (e *EnqueueRequestForOwner) getOwnersReferences(object metav1.Object) []metav1.OwnerReference { - if object == nil { +// - if IsController is false: take all OwnerReferences. +func (e *enqueueRequestForOwner[object]) getOwnersReferences(obj metav1.Object) []metav1.OwnerReference { + if obj == nil { return nil } // If not filtered as Controller only, then use all the OwnerReferences - if !e.IsController { - return object.GetOwnerReferences() + if !e.isController { + return obj.GetOwnerReferences() } // If filtered to a Controller, only take the Controller OwnerReference - if ownerRef := metav1.GetControllerOf(object); ownerRef != nil { + if ownerRef := metav1.GetControllerOf(obj); ownerRef != nil { return []metav1.OwnerReference{*ownerRef} } // No Controller OwnerReference found return nil } - -var _ inject.Scheme = &EnqueueRequestForOwner{} - -// InjectScheme is called by the Controller to provide a singleton scheme to the EnqueueRequestForOwner. -func (e *EnqueueRequestForOwner) InjectScheme(s *runtime.Scheme) error { - return e.parseOwnerTypeGroupKind(s) -} - -var _ inject.Mapper = &EnqueueRequestForOwner{} - -// InjectMapper is called by the Controller to provide the rest mapper used by the manager. -func (e *EnqueueRequestForOwner) InjectMapper(m meta.RESTMapper) error { - e.mapper = m - return nil -} diff --git a/pkg/handler/eventhandler.go b/pkg/handler/eventhandler.go index 840cf5b892..88510d29ed 100644 --- a/pkg/handler/eventhandler.go +++ b/pkg/handler/eventhandler.go @@ -17,13 +17,21 @@ limitations under the License. package handler import ( + "context" + "reflect" + "time" + "k8s.io/client-go/util/workqueue" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue" "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/reconcile" ) // EventHandler enqueues reconcile.Requests in response to events (e.g. Pod Create). EventHandlers map an Event // for one object to trigger Reconciles for either the same object or different objects - e.g. if there is an -// Event for object with type Foo (using source.KindSource) then reconcile one or more object(s) with type Bar. +// Event for object with type Foo (using source.Kind) then reconcile one or more object(s) with type Bar. // // Identical reconcile.Requests will be batched together through the queuing mechanism before reconcile is called. // @@ -33,72 +41,207 @@ import ( // * Use EnqueueRequestForOwner to reconcile the owner of the object the event is for // - do this for events for the types the Controller creates. (e.g. ReplicaSets created by a Deployment Controller) // -// * Use EnqueueRequestFromMapFunc to transform an event for an object to a reconcile of an object +// * Use EnqueueRequestsFromMapFunc to transform an event for an object to a reconcile of an object // of a different type - do this for events for types the Controller may be interested in, but doesn't create. // (e.g. If Foo responds to cluster size events, map Node events to Foo objects.) // // Unless you are implementing your own EventHandler, you can ignore the functions on the EventHandler interface. // Most users shouldn't need to implement their own EventHandler. -type EventHandler interface { - // Create is called in response to an create event - e.g. Pod Creation. - Create(event.CreateEvent, workqueue.RateLimitingInterface) +type EventHandler = TypedEventHandler[client.Object, reconcile.Request] + +// TypedEventHandler enqueues reconcile.Requests in response to events (e.g. Pod Create). TypedEventHandlers map an Event +// for one object to trigger Reconciles for either the same object or different objects - e.g. if there is an +// Event for object with type Foo (using source.Kind) then reconcile one or more object(s) with type Bar. +// +// Identical reconcile.Requests will be batched together through the queuing mechanism before reconcile is called. +// +// * Use TypedEnqueueRequestForObject to reconcile the object the event is for +// - do this for events for the type the Controller Reconciles. (e.g. Deployment for a Deployment Controller) +// +// * Use TypedEnqueueRequestForOwner to reconcile the owner of the object the event is for +// - do this for events for the types the Controller creates. (e.g. ReplicaSets created by a Deployment Controller) +// +// * Use TypedEnqueueRequestsFromMapFunc to transform an event for an object to a reconcile of an object +// of a different type - do this for events for types the Controller may be interested in, but doesn't create. +// (e.g. If Foo responds to cluster size events, map Node events to Foo objects.) +// +// Unless you are implementing your own TypedEventHandler, you can ignore the functions on the TypedEventHandler interface. +// Most users shouldn't need to implement their own TypedEventHandler. +// +// TypedEventHandler is experimental and subject to future change. +type TypedEventHandler[object any, request comparable] interface { + // Create is called in response to a create event - e.g. Pod Creation. + Create(context.Context, event.TypedCreateEvent[object], workqueue.TypedRateLimitingInterface[request]) // Update is called in response to an update event - e.g. Pod Updated. - Update(event.UpdateEvent, workqueue.RateLimitingInterface) + Update(context.Context, event.TypedUpdateEvent[object], workqueue.TypedRateLimitingInterface[request]) // Delete is called in response to a delete event - e.g. Pod Deleted. - Delete(event.DeleteEvent, workqueue.RateLimitingInterface) + Delete(context.Context, event.TypedDeleteEvent[object], workqueue.TypedRateLimitingInterface[request]) // Generic is called in response to an event of an unknown type or a synthetic event triggered as a cron or // external trigger request - e.g. reconcile Autoscaling, or a Webhook. - Generic(event.GenericEvent, workqueue.RateLimitingInterface) + Generic(context.Context, event.TypedGenericEvent[object], workqueue.TypedRateLimitingInterface[request]) } var _ EventHandler = Funcs{} -// Funcs implements EventHandler. -type Funcs struct { +// Funcs implements eventhandler. +type Funcs = TypedFuncs[client.Object, reconcile.Request] + +// TypedFuncs implements eventhandler. +// +// TypedFuncs is experimental and subject to future change. +type TypedFuncs[object any, request comparable] struct { // Create is called in response to an add event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - CreateFunc func(event.CreateEvent, workqueue.RateLimitingInterface) + CreateFunc func(context.Context, event.TypedCreateEvent[object], workqueue.TypedRateLimitingInterface[request]) // Update is called in response to an update event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - UpdateFunc func(event.UpdateEvent, workqueue.RateLimitingInterface) + UpdateFunc func(context.Context, event.TypedUpdateEvent[object], workqueue.TypedRateLimitingInterface[request]) // Delete is called in response to a delete event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - DeleteFunc func(event.DeleteEvent, workqueue.RateLimitingInterface) + DeleteFunc func(context.Context, event.TypedDeleteEvent[object], workqueue.TypedRateLimitingInterface[request]) // GenericFunc is called in response to a generic event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - GenericFunc func(event.GenericEvent, workqueue.RateLimitingInterface) + GenericFunc func(context.Context, event.TypedGenericEvent[object], workqueue.TypedRateLimitingInterface[request]) } -// Create implements EventHandler -func (h Funcs) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { +var typeForClientObject = reflect.TypeFor[client.Object]() + +func implementsClientObject[object any]() bool { + return reflect.TypeFor[object]().Implements(typeForClientObject) +} + +func isPriorityQueue[request comparable](q workqueue.TypedRateLimitingInterface[request]) bool { + _, ok := q.(priorityqueue.PriorityQueue[request]) + return ok +} + +// Create implements EventHandler. +func (h TypedFuncs[object, request]) Create(ctx context.Context, e event.TypedCreateEvent[object], q workqueue.TypedRateLimitingInterface[request]) { if h.CreateFunc != nil { - h.CreateFunc(e, q) + if !implementsClientObject[object]() || !isPriorityQueue(q) || isNil(e.Object) { + h.CreateFunc(ctx, e, q) + return + } + + wq := workqueueWithDefaultPriority[request]{ + // We already know that we have a priority queue, that event.Object implements + // client.Object and that its not nil + PriorityQueue: q.(priorityqueue.PriorityQueue[request]), + } + if e.IsInInitialList { + wq.priority = ptr.To(LowPriority) + } + h.CreateFunc(ctx, e, wq) } } -// Delete implements EventHandler -func (h Funcs) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { +// Delete implements EventHandler. +func (h TypedFuncs[object, request]) Delete(ctx context.Context, e event.TypedDeleteEvent[object], q workqueue.TypedRateLimitingInterface[request]) { if h.DeleteFunc != nil { - h.DeleteFunc(e, q) + h.DeleteFunc(ctx, e, q) } } -// Update implements EventHandler -func (h Funcs) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { +// Update implements EventHandler. +func (h TypedFuncs[object, request]) Update(ctx context.Context, e event.TypedUpdateEvent[object], q workqueue.TypedRateLimitingInterface[request]) { if h.UpdateFunc != nil { - h.UpdateFunc(e, q) + if !implementsClientObject[object]() || !isPriorityQueue(q) || isNil(e.ObjectOld) || isNil(e.ObjectNew) { + h.UpdateFunc(ctx, e, q) + return + } + + wq := workqueueWithDefaultPriority[request]{ + // We already know that we have a priority queue, that event.ObjectOld and ObjectNew implement + // client.Object and that they are not nil + PriorityQueue: q.(priorityqueue.PriorityQueue[request]), + } + if any(e.ObjectOld).(client.Object).GetResourceVersion() == any(e.ObjectNew).(client.Object).GetResourceVersion() { + wq.priority = ptr.To(LowPriority) + } + h.UpdateFunc(ctx, e, wq) } } -// Generic implements EventHandler -func (h Funcs) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) { +// Generic implements EventHandler. +func (h TypedFuncs[object, request]) Generic(ctx context.Context, e event.TypedGenericEvent[object], q workqueue.TypedRateLimitingInterface[request]) { if h.GenericFunc != nil { - h.GenericFunc(e, q) + h.GenericFunc(ctx, e, q) + } +} + +// LowPriority is the priority set by WithLowPriorityWhenUnchanged +const LowPriority = -100 + +// WithLowPriorityWhenUnchanged reduces the priority of events stemming from the initial listwatch or from a resync if +// and only if a priorityqueue.PriorityQueue is used. If not, it does nothing. +func WithLowPriorityWhenUnchanged[object client.Object, request comparable](u TypedEventHandler[object, request]) TypedEventHandler[object, request] { + // TypedFuncs already implements this so just wrap + return TypedFuncs[object, request]{ + CreateFunc: u.Create, + UpdateFunc: u.Update, + DeleteFunc: u.Delete, + GenericFunc: u.Generic, + } +} + +type workqueueWithDefaultPriority[request comparable] struct { + priorityqueue.PriorityQueue[request] + priority *int +} + +func (w workqueueWithDefaultPriority[request]) Add(item request) { + w.PriorityQueue.AddWithOpts(priorityqueue.AddOpts{Priority: w.priority}, item) +} + +func (w workqueueWithDefaultPriority[request]) AddAfter(item request, after time.Duration) { + w.PriorityQueue.AddWithOpts(priorityqueue.AddOpts{Priority: w.priority, After: after}, item) +} + +func (w workqueueWithDefaultPriority[request]) AddRateLimited(item request) { + w.PriorityQueue.AddWithOpts(priorityqueue.AddOpts{Priority: w.priority, RateLimited: true}, item) +} + +func (w workqueueWithDefaultPriority[request]) AddWithOpts(o priorityqueue.AddOpts, items ...request) { + if o.Priority == nil { + o.Priority = w.priority + } + w.PriorityQueue.AddWithOpts(o, items...) +} + +// addToQueueCreate adds the reconcile.Request to the priorityqueue in the handler +// for Create requests if and only if the workqueue being used is of type priorityqueue.PriorityQueue[reconcile.Request] +func addToQueueCreate[T client.Object, request comparable](q workqueue.TypedRateLimitingInterface[request], evt event.TypedCreateEvent[T], item request) { + priorityQueue, isPriorityQueue := q.(priorityqueue.PriorityQueue[request]) + if !isPriorityQueue { + q.Add(item) + return + } + + var priority *int + if evt.IsInInitialList { + priority = ptr.To(LowPriority) + } + priorityQueue.AddWithOpts(priorityqueue.AddOpts{Priority: priority}, item) +} + +// addToQueueUpdate adds the reconcile.Request to the priorityqueue in the handler +// for Update requests if and only if the workqueue being used is of type priorityqueue.PriorityQueue[reconcile.Request] +func addToQueueUpdate[T client.Object, request comparable](q workqueue.TypedRateLimitingInterface[request], evt event.TypedUpdateEvent[T], item request) { + priorityQueue, isPriorityQueue := q.(priorityqueue.PriorityQueue[request]) + if !isPriorityQueue { + q.Add(item) + return + } + + var priority *int + if evt.ObjectOld.GetResourceVersion() == evt.ObjectNew.GetResourceVersion() { + priority = ptr.To(LowPriority) } + priorityQueue.AddWithOpts(priorityqueue.AddOpts{Priority: priority}, item) } diff --git a/pkg/handler/eventhandler_suite_test.go b/pkg/handler/eventhandler_suite_test.go index 0463acdd5d..3f6b17f337 100644 --- a/pkg/handler/eventhandler_suite_test.go +++ b/pkg/handler/eventhandler_suite_test.go @@ -19,7 +19,7 @@ package handler_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/envtest" @@ -29,14 +29,14 @@ import ( func TestEventhandler(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, "Eventhandler Suite", []Reporter{envtest.NewlineReporter{}}) + RunSpecs(t, "Eventhandler Suite") } var testenv *envtest.Environment var cfg *rest.Config var _ = BeforeSuite(func() { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) testenv = &envtest.Environment{} var err error diff --git a/pkg/handler/eventhandler_test.go b/pkg/handler/eventhandler_test.go index 13901955a4..2a7453f761 100644 --- a/pkg/handler/eventhandler_test.go +++ b/pkg/handler/eventhandler_test.go @@ -1,23 +1,26 @@ /* -Copyright 2018 The Kubernetes Authors. + Copyright 2018 The Kubernetes Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ package handler_test import ( - . "github.com/onsi/ginkgo" + "context" + "time" + + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" @@ -26,337 +29,269 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" "k8s.io/client-go/util/workqueue" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" + "sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) var _ = Describe("Eventhandler", func() { - var q workqueue.RateLimitingInterface + var q workqueue.TypedRateLimitingInterface[reconcile.Request] var instance handler.EnqueueRequestForObject var pod *corev1.Pod var mapper meta.RESTMapper - t := true BeforeEach(func() { - q = controllertest.Queue{Interface: workqueue.New()} - instance = handler.EnqueueRequestForObject{} + q = &controllertest.Queue{TypedInterface: workqueue.NewTyped[reconcile.Request]()} pod = &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Namespace: "biz", Name: "baz"}, } Expect(cfg).NotTo(BeNil()) - var err error - mapper, err = apiutil.NewDiscoveryRESTMapper(cfg) + httpClient, err := rest.HTTPClientFor(cfg) + Expect(err).ShouldNot(HaveOccurred()) + mapper, err = apiutil.NewDynamicRESTMapper(cfg, httpClient) Expect(err).ShouldNot(HaveOccurred()) }) Describe("EnqueueRequestForObject", func() { - It("should enqueue a Request with the Name / Namespace of the object in the CreateEvent.", func(done Done) { + It("should enqueue a Request with the Name / Namespace of the object in the CreateEvent.", func(ctx SpecContext) { evt := event.CreateEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.Create(evt, q) + instance.Create(ctx, evt, q) Expect(q.Len()).To(Equal(1)) - i, _ := q.Get() - Expect(i).NotTo(BeNil()) - req, ok := i.(reconcile.Request) - Expect(ok).To(BeTrue()) + req, _ := q.Get() Expect(req.NamespacedName).To(Equal(types.NamespacedName{Namespace: "biz", Name: "baz"})) - - close(done) }) - It("should enqueue a Request with the Name / Namespace of the object in the DeleteEvent.", func(done Done) { + It("should enqueue a Request with the Name / Namespace of the object in the DeleteEvent.", func(ctx SpecContext) { evt := event.DeleteEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.Delete(evt, q) + instance.Delete(ctx, evt, q) Expect(q.Len()).To(Equal(1)) - i, _ := q.Get() - Expect(i).NotTo(BeNil()) - req, ok := i.(reconcile.Request) - Expect(ok).To(BeTrue()) + req, _ := q.Get() Expect(req.NamespacedName).To(Equal(types.NamespacedName{Namespace: "biz", Name: "baz"})) - - close(done) }) - It("should enqueue a Request with the Name / Namespace of both objects in the UpdateEvent.", - func(done Done) { + It("should enqueue a Request with the Name / Namespace of one object in the UpdateEvent.", + func(ctx SpecContext) { newPod := pod.DeepCopy() newPod.Name = "baz2" newPod.Namespace = "biz2" evt := event.UpdateEvent{ ObjectOld: pod, - MetaOld: pod.GetObjectMeta(), ObjectNew: newPod, - MetaNew: newPod.GetObjectMeta(), } - instance.Update(evt, q) - Expect(q.Len()).To(Equal(2)) - - i, _ := q.Get() - Expect(i).NotTo(BeNil()) - req, ok := i.(reconcile.Request) - Expect(ok).To(BeTrue()) - Expect(req.NamespacedName).To(Equal(types.NamespacedName{Namespace: "biz", Name: "baz"})) + instance.Update(ctx, evt, q) + Expect(q.Len()).To(Equal(1)) - i, _ = q.Get() - Expect(i).NotTo(BeNil()) - req, ok = i.(reconcile.Request) - Expect(ok).To(BeTrue()) + req, _ := q.Get() Expect(req.NamespacedName).To(Equal(types.NamespacedName{Namespace: "biz2", Name: "baz2"})) - - close(done) }) - It("should enqueue a Request with the Name / Namespace of the object in the GenericEvent.", func(done Done) { + It("should enqueue a Request with the Name / Namespace of the object in the GenericEvent.", func(ctx SpecContext) { evt := event.GenericEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.Generic(evt, q) + instance.Generic(ctx, evt, q) Expect(q.Len()).To(Equal(1)) - i, _ := q.Get() - Expect(i).NotTo(BeNil()) - req, ok := i.(reconcile.Request) - Expect(ok).To(BeTrue()) + req, _ := q.Get() Expect(req.NamespacedName).To(Equal(types.NamespacedName{Namespace: "biz", Name: "baz"})) - - close(done) }) - Context("for a runtime.Object without Metadata", func() { - It("should do nothing if the Metadata is missing for a CreateEvent.", func(done Done) { + Context("for a runtime.Object without Object", func() { + It("should do nothing if the Object is missing for a CreateEvent.", func(ctx SpecContext) { evt := event.CreateEvent{ - Object: pod, + Object: nil, } - instance.Create(evt, q) + instance.Create(ctx, evt, q) Expect(q.Len()).To(Equal(0)) - close(done) }) - It("should do nothing if the Metadata is missing for a UpdateEvent.", func(done Done) { + It("should do nothing if the Object is missing for a UpdateEvent.", func(ctx SpecContext) { newPod := pod.DeepCopy() newPod.Name = "baz2" newPod.Namespace = "biz2" evt := event.UpdateEvent{ ObjectNew: newPod, - MetaNew: newPod.GetObjectMeta(), - ObjectOld: pod, + ObjectOld: nil, } - instance.Update(evt, q) + instance.Update(ctx, evt, q) Expect(q.Len()).To(Equal(1)) - i, _ := q.Get() - Expect(i).NotTo(BeNil()) - req, ok := i.(reconcile.Request) - Expect(ok).To(BeTrue()) + req, _ := q.Get() Expect(req.NamespacedName).To(Equal(types.NamespacedName{Namespace: "biz2", Name: "baz2"})) - evt.MetaNew = nil - evt.MetaOld = pod.GetObjectMeta() - instance.Update(evt, q) + evt.ObjectNew = nil + evt.ObjectOld = pod + instance.Update(ctx, evt, q) Expect(q.Len()).To(Equal(1)) - i, _ = q.Get() - Expect(i).NotTo(BeNil()) - req, ok = i.(reconcile.Request) - Expect(ok).To(BeTrue()) + req, _ = q.Get() Expect(req.NamespacedName).To(Equal(types.NamespacedName{Namespace: "biz", Name: "baz"})) - - close(done) }) - It("should do nothing if the Metadata is missing for a DeleteEvent.", func(done Done) { + It("should do nothing if the Object is missing for a DeleteEvent.", func(ctx SpecContext) { evt := event.DeleteEvent{ - Object: pod, + Object: nil, } - instance.Delete(evt, q) + instance.Delete(ctx, evt, q) Expect(q.Len()).To(Equal(0)) - close(done) }) - It("should do nothing if the Metadata is missing for a GenericEvent.", func(done Done) { + It("should do nothing if the Object is missing for a GenericEvent.", func(ctx SpecContext) { evt := event.GenericEvent{ - Object: pod, + Object: nil, } - instance.Generic(evt, q) + instance.Generic(ctx, evt, q) Expect(q.Len()).To(Equal(0)) - close(done) }) }) }) Describe("EnqueueRequestsFromMapFunc", func() { - It("should enqueue a Request with the function applied to the CreateEvent.", func() { + It("should enqueue a Request with the function applied to the CreateEvent.", func(ctx SpecContext) { req := []reconcile.Request{} - instance := handler.EnqueueRequestsFromMapFunc{ - ToRequests: handler.ToRequestsFunc(func(a handler.MapObject) []reconcile.Request { - defer GinkgoRecover() - Expect(a.Meta).To(Equal(pod.GetObjectMeta())) - Expect(a.Object).To(Equal(pod)) - req = []reconcile.Request{ - { - NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}, - }, - { - NamespacedName: types.NamespacedName{Namespace: "biz", Name: "baz"}, - }, - } - return req - }), - } + instance := handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a client.Object) []reconcile.Request { + defer GinkgoRecover() + Expect(a).To(Equal(pod)) + req = []reconcile.Request{ + { + NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}, + }, + { + NamespacedName: types.NamespacedName{Namespace: "biz", Name: "baz"}, + }, + } + return req + }) evt := event.CreateEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.Create(evt, q) + instance.Create(ctx, evt, q) Expect(q.Len()).To(Equal(2)) - i, _ := q.Get() - Expect(i).To(Equal(reconcile.Request{ - NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}})) - - i, _ = q.Get() - Expect(i).To(Equal(reconcile.Request{ - NamespacedName: types.NamespacedName{Namespace: "biz", Name: "baz"}})) + i1, _ := q.Get() + i2, _ := q.Get() + Expect([]interface{}{i1, i2}).To(ConsistOf( + reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}}, + reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "biz", Name: "baz"}}, + )) }) - It("should enqueue a Request with the function applied to the DeleteEvent.", func() { + It("should enqueue a Request with the function applied to the DeleteEvent.", func(ctx SpecContext) { req := []reconcile.Request{} - instance := handler.EnqueueRequestsFromMapFunc{ - ToRequests: handler.ToRequestsFunc(func(a handler.MapObject) []reconcile.Request { - defer GinkgoRecover() - Expect(a.Meta).To(Equal(pod.GetObjectMeta())) - Expect(a.Object).To(Equal(pod)) - req = []reconcile.Request{ - { - NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}, - }, - { - NamespacedName: types.NamespacedName{Namespace: "biz", Name: "baz"}, - }, - } - return req - }), - } + instance := handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a client.Object) []reconcile.Request { + defer GinkgoRecover() + Expect(a).To(Equal(pod)) + req = []reconcile.Request{ + { + NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}, + }, + { + NamespacedName: types.NamespacedName{Namespace: "biz", Name: "baz"}, + }, + } + return req + }) evt := event.DeleteEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.Delete(evt, q) + instance.Delete(ctx, evt, q) Expect(q.Len()).To(Equal(2)) - i, _ := q.Get() - Expect(i).To(Equal(reconcile.Request{ - NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}})) - - i, _ = q.Get() - Expect(i).To(Equal(reconcile.Request{ - NamespacedName: types.NamespacedName{Namespace: "biz", Name: "baz"}})) + i1, _ := q.Get() + i2, _ := q.Get() + Expect([]interface{}{i1, i2}).To(ConsistOf( + reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}}, + reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "biz", Name: "baz"}}, + )) }) It("should enqueue a Request with the function applied to both objects in the UpdateEvent.", - func() { + func(ctx SpecContext) { newPod := pod.DeepCopy() - newPod.Name = pod.Name + "2" - newPod.Namespace = pod.Namespace + "2" req := []reconcile.Request{} - instance := handler.EnqueueRequestsFromMapFunc{ - ToRequests: handler.ToRequestsFunc(func(a handler.MapObject) []reconcile.Request { - defer GinkgoRecover() - req = []reconcile.Request{ - { - NamespacedName: types.NamespacedName{Namespace: "foo", Name: a.Meta.GetName() + "-bar"}, - }, - { - NamespacedName: types.NamespacedName{Namespace: "biz", Name: a.Meta.GetName() + "-baz"}, - }, - } - return req - }), - } + + instance := handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a client.Object) []reconcile.Request { + defer GinkgoRecover() + req = []reconcile.Request{ + { + NamespacedName: types.NamespacedName{Namespace: "foo", Name: a.GetName() + "-bar"}, + }, + { + NamespacedName: types.NamespacedName{Namespace: "biz", Name: a.GetName() + "-baz"}, + }, + } + return req + }) evt := event.UpdateEvent{ ObjectOld: pod, - MetaOld: pod.GetObjectMeta(), ObjectNew: newPod, - MetaNew: newPod.GetObjectMeta(), } - instance.Update(evt, q) - Expect(q.Len()).To(Equal(4)) + instance.Update(ctx, evt, q) + Expect(q.Len()).To(Equal(2)) i, _ := q.Get() - Expect(i).To(Equal(reconcile.Request{ - NamespacedName: types.NamespacedName{Namespace: "foo", Name: "baz-bar"}})) + Expect(i).To(Equal(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "foo", Name: "baz-bar"}})) i, _ = q.Get() - Expect(i).To(Equal(reconcile.Request{ - NamespacedName: types.NamespacedName{Namespace: "biz", Name: "baz-baz"}})) - - i, _ = q.Get() - Expect(i).To(Equal(reconcile.Request{ - NamespacedName: types.NamespacedName{Namespace: "foo", Name: "baz2-bar"}})) - - i, _ = q.Get() - Expect(i).To(Equal(reconcile.Request{ - NamespacedName: types.NamespacedName{Namespace: "biz", Name: "baz2-baz"}})) + Expect(i).To(Equal(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "biz", Name: "baz-baz"}})) }) - It("should enqueue a Request with the function applied to the GenericEvent.", func() { + It("should enqueue a Request with the function applied to the GenericEvent.", func(ctx SpecContext) { req := []reconcile.Request{} - instance := handler.EnqueueRequestsFromMapFunc{ - ToRequests: handler.ToRequestsFunc(func(a handler.MapObject) []reconcile.Request { - defer GinkgoRecover() - Expect(a.Meta).To(Equal(pod.GetObjectMeta())) - Expect(a.Object).To(Equal(pod)) - req = []reconcile.Request{ - { - NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}, - }, - { - NamespacedName: types.NamespacedName{Namespace: "biz", Name: "baz"}, - }, - } - return req - }), - } + instance := handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a client.Object) []reconcile.Request { + defer GinkgoRecover() + Expect(a).To(Equal(pod)) + req = []reconcile.Request{ + { + NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}, + }, + { + NamespacedName: types.NamespacedName{Namespace: "biz", Name: "baz"}, + }, + } + return req + }) evt := event.GenericEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.Generic(evt, q) + instance.Generic(ctx, evt, q) Expect(q.Len()).To(Equal(2)) - i, _ := q.Get() - Expect(i).To(Equal(reconcile.Request{ - NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}})) - - i, _ = q.Get() - Expect(i).To(Equal(reconcile.Request{ - NamespacedName: types.NamespacedName{Namespace: "biz", Name: "baz"}})) + i1, _ := q.Get() + i2, _ := q.Get() + Expect([]interface{}{i1, i2}).To(ConsistOf( + reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}}, + reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "biz", Name: "baz"}}, + )) }) }) Describe("EnqueueRequestForOwner", func() { - It("should enqueue a Request with the Owner of the object in the CreateEvent.", func() { - instance := handler.EnqueueRequestForOwner{ - OwnerType: &appsv1.ReplicaSet{}, - } - Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) - Expect(instance.InjectMapper(mapper)).To(Succeed()) + It("should enqueue a Request with the Owner of the object in the CreateEvent.", func(ctx SpecContext) { + instance := handler.EnqueueRequestForOwner(scheme.Scheme, mapper, &appsv1.ReplicaSet{}) pod.OwnerReferences = []metav1.OwnerReference{ { @@ -367,9 +302,8 @@ var _ = Describe("Eventhandler", func() { } evt := event.CreateEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.Create(evt, q) + instance.Create(ctx, evt, q) Expect(q.Len()).To(Equal(1)) i, _ := q.Get() @@ -377,12 +311,8 @@ var _ = Describe("Eventhandler", func() { NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo-parent"}})) }) - It("should enqueue a Request with the Owner of the object in the DeleteEvent.", func() { - instance := handler.EnqueueRequestForOwner{ - OwnerType: &appsv1.ReplicaSet{}, - } - Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) - Expect(instance.InjectMapper(mapper)).To(Succeed()) + It("should enqueue a Request with the Owner of the object in the DeleteEvent.", func(ctx SpecContext) { + instance := handler.EnqueueRequestForOwner(scheme.Scheme, mapper, &appsv1.ReplicaSet{}) pod.OwnerReferences = []metav1.OwnerReference{ { @@ -393,9 +323,8 @@ var _ = Describe("Eventhandler", func() { } evt := event.DeleteEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.Delete(evt, q) + instance.Delete(ctx, evt, q) Expect(q.Len()).To(Equal(1)) i, _ := q.Get() @@ -403,16 +332,12 @@ var _ = Describe("Eventhandler", func() { NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo-parent"}})) }) - It("should enqueue a Request with the Owners of both objects in the UpdateEvent.", func() { + It("should enqueue a Request with the Owners of both objects in the UpdateEvent.", func(ctx SpecContext) { newPod := pod.DeepCopy() newPod.Name = pod.Name + "2" newPod.Namespace = pod.Namespace + "2" - instance := handler.EnqueueRequestForOwner{ - OwnerType: &appsv1.ReplicaSet{}, - } - Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) - Expect(instance.InjectMapper(mapper)).To(Succeed()) + instance := handler.EnqueueRequestForOwner(scheme.Scheme, mapper, &appsv1.ReplicaSet{}) pod.OwnerReferences = []metav1.OwnerReference{ { @@ -430,29 +355,55 @@ var _ = Describe("Eventhandler", func() { } evt := event.UpdateEvent{ ObjectOld: pod, - MetaOld: pod.GetObjectMeta(), ObjectNew: newPod, - MetaNew: newPod.GetObjectMeta(), } - instance.Update(evt, q) + instance.Update(ctx, evt, q) Expect(q.Len()).To(Equal(2)) - i, _ := q.Get() - Expect(i).To(Equal(reconcile.Request{ - NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo1-parent"}})) - - i, _ = q.Get() - Expect(i).To(Equal(reconcile.Request{ - NamespacedName: types.NamespacedName{Namespace: newPod.GetNamespace(), Name: "foo2-parent"}})) + i1, _ := q.Get() + i2, _ := q.Get() + Expect([]interface{}{i1, i2}).To(ConsistOf( + reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo1-parent"}}, + reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: newPod.GetNamespace(), Name: "foo2-parent"}}, + )) }) - It("should enqueue a Request with the Owner of the object in the GenericEvent.", func() { - instance := handler.EnqueueRequestForOwner{ - OwnerType: &appsv1.ReplicaSet{}, + It("should enqueue a Request with the one duplicate Owner of both objects in the UpdateEvent.", func(ctx SpecContext) { + newPod := pod.DeepCopy() + newPod.Name = pod.Name + "2" + + instance := handler.EnqueueRequestForOwner(scheme.Scheme, mapper, &appsv1.ReplicaSet{}) + + pod.OwnerReferences = []metav1.OwnerReference{ + { + Name: "foo-parent", + Kind: "ReplicaSet", + APIVersion: "apps/v1", + }, } - Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) - Expect(instance.InjectMapper(mapper)).To(Succeed()) + newPod.OwnerReferences = []metav1.OwnerReference{ + { + Name: "foo-parent", + Kind: "ReplicaSet", + APIVersion: "apps/v1", + }, + } + evt := event.UpdateEvent{ + ObjectOld: pod, + ObjectNew: newPod, + } + instance.Update(ctx, evt, q) + Expect(q.Len()).To(Equal(1)) + + i, _ := q.Get() + Expect(i).To(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo-parent"}})) + }) + It("should enqueue a Request with the Owner of the object in the GenericEvent.", func(ctx SpecContext) { + instance := handler.EnqueueRequestForOwner(scheme.Scheme, mapper, &appsv1.ReplicaSet{}) pod.OwnerReferences = []metav1.OwnerReference{ { Name: "foo-parent", @@ -462,9 +413,8 @@ var _ = Describe("Eventhandler", func() { } evt := event.GenericEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.Generic(evt, q) + instance.Generic(ctx, evt, q) Expect(q.Len()).To(Equal(1)) i, _ := q.Get() @@ -472,13 +422,8 @@ var _ = Describe("Eventhandler", func() { NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo-parent"}})) }) - It("should not enqueue a Request if there are no owners matching Group and Kind.", func() { - instance := handler.EnqueueRequestForOwner{ - OwnerType: &appsv1.ReplicaSet{}, - IsController: t, - } - Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) - Expect(instance.InjectMapper(mapper)).To(Succeed()) + It("should not enqueue a Request if there are no owners matching Group and Kind.", func(ctx SpecContext) { + instance := handler.EnqueueRequestForOwner(scheme.Scheme, mapper, &appsv1.ReplicaSet{}, handler.OnlyControllerOwner()) pod.OwnerReferences = []metav1.OwnerReference{ { // Wrong group Name: "foo1-parent", @@ -493,31 +438,25 @@ var _ = Describe("Eventhandler", func() { } evt := event.CreateEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.Create(evt, q) + instance.Create(ctx, evt, q) Expect(q.Len()).To(Equal(0)) }) It("should enqueue a Request if there are owners matching Group "+ - "and Kind with a different version.", func() { - instance := handler.EnqueueRequestForOwner{ - OwnerType: &autoscalingv1.HorizontalPodAutoscaler{}, - } - Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) - Expect(instance.InjectMapper(mapper)).To(Succeed()) + "and Kind with a different version.", func(ctx SpecContext) { + instance := handler.EnqueueRequestForOwner(scheme.Scheme, mapper, &autoscalingv1.HorizontalPodAutoscaler{}) pod.OwnerReferences = []metav1.OwnerReference{ { Name: "foo-parent", Kind: "HorizontalPodAutoscaler", - APIVersion: "autoscaling/v2beta1", + APIVersion: "autoscaling/v2", }, } evt := event.CreateEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.Create(evt, q) + instance.Create(ctx, evt, q) Expect(q.Len()).To(Equal(1)) i, _ := q.Get() @@ -525,12 +464,8 @@ var _ = Describe("Eventhandler", func() { NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo-parent"}})) }) - It("should enqueue a Request for a owner that is cluster scoped", func() { - instance := handler.EnqueueRequestForOwner{ - OwnerType: &corev1.Node{}, - } - Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) - Expect(instance.InjectMapper(mapper)).To(Succeed()) + It("should enqueue a Request for a owner that is cluster scoped", func(ctx SpecContext) { + instance := handler.EnqueueRequestForOwner(scheme.Scheme, mapper, &corev1.Node{}) pod.OwnerReferences = []metav1.OwnerReference{ { Name: "node-1", @@ -540,9 +475,8 @@ var _ = Describe("Eventhandler", func() { } evt := event.CreateEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.Create(evt, q) + instance.Create(ctx, evt, q) Expect(q.Len()).To(Equal(1)) i, _ := q.Get() @@ -551,29 +485,19 @@ var _ = Describe("Eventhandler", func() { }) - It("should not enqueue a Request if there are no owners.", func() { - instance := handler.EnqueueRequestForOwner{ - OwnerType: &appsv1.ReplicaSet{}, - } - Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) - Expect(instance.InjectMapper(mapper)).To(Succeed()) + It("should not enqueue a Request if there are no owners.", func(ctx SpecContext) { + instance := handler.EnqueueRequestForOwner(scheme.Scheme, mapper, &appsv1.ReplicaSet{}) evt := event.CreateEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.Create(evt, q) + instance.Create(ctx, evt, q) Expect(q.Len()).To(Equal(0)) }) Context("with the Controller field set to true", func() { It("should enqueue reconcile.Requests for only the first the Controller if there are "+ - "multiple Controller owners.", func() { - instance := handler.EnqueueRequestForOwner{ - OwnerType: &appsv1.ReplicaSet{}, - IsController: t, - } - Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) - Expect(instance.InjectMapper(mapper)).To(Succeed()) + "multiple Controller owners.", func(ctx SpecContext) { + instance := handler.EnqueueRequestForOwner(scheme.Scheme, mapper, &appsv1.ReplicaSet{}, handler.OnlyControllerOwner()) pod.OwnerReferences = []metav1.OwnerReference{ { Name: "foo1-parent", @@ -584,7 +508,7 @@ var _ = Describe("Eventhandler", func() { Name: "foo2-parent", Kind: "ReplicaSet", APIVersion: "apps/v1", - Controller: &t, + Controller: ptr.To(true), }, { Name: "foo3-parent", @@ -595,7 +519,7 @@ var _ = Describe("Eventhandler", func() { Name: "foo4-parent", Kind: "ReplicaSet", APIVersion: "apps/v1", - Controller: &t, + Controller: ptr.To(true), }, { Name: "foo5-parent", @@ -605,22 +529,16 @@ var _ = Describe("Eventhandler", func() { } evt := event.CreateEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.Create(evt, q) + instance.Create(ctx, evt, q) Expect(q.Len()).To(Equal(1)) i, _ := q.Get() Expect(i).To(Equal(reconcile.Request{ NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo2-parent"}})) }) - It("should not enqueue reconcile.Requests if there are no Controller owners.", func() { - instance := handler.EnqueueRequestForOwner{ - OwnerType: &appsv1.ReplicaSet{}, - IsController: t, - } - Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) - Expect(instance.InjectMapper(mapper)).To(Succeed()) + It("should not enqueue reconcile.Requests if there are no Controller owners.", func(ctx SpecContext) { + instance := handler.EnqueueRequestForOwner(scheme.Scheme, mapper, &appsv1.ReplicaSet{}, handler.OnlyControllerOwner()) pod.OwnerReferences = []metav1.OwnerReference{ { Name: "foo1-parent", @@ -640,35 +558,24 @@ var _ = Describe("Eventhandler", func() { } evt := event.CreateEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.Create(evt, q) + instance.Create(ctx, evt, q) Expect(q.Len()).To(Equal(0)) }) - It("should not enqueue reconcile.Requests if there are no owners.", func() { - instance := handler.EnqueueRequestForOwner{ - OwnerType: &appsv1.ReplicaSet{}, - IsController: t, - } - Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) - Expect(instance.InjectMapper(mapper)).To(Succeed()) + It("should not enqueue reconcile.Requests if there are no owners.", func(ctx SpecContext) { + instance := handler.EnqueueRequestForOwner(scheme.Scheme, mapper, &appsv1.ReplicaSet{}, handler.OnlyControllerOwner()) evt := event.CreateEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.Create(evt, q) + instance.Create(ctx, evt, q) Expect(q.Len()).To(Equal(0)) }) }) Context("with the Controller field set to false", func() { - It("should enqueue a reconcile.Requests for all owners.", func() { - instance := handler.EnqueueRequestForOwner{ - OwnerType: &appsv1.ReplicaSet{}, - } - Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) - Expect(instance.InjectMapper(mapper)).To(Succeed()) + It("should enqueue a reconcile.Requests for all owners.", func(ctx SpecContext) { + instance := handler.EnqueueRequestForOwner(scheme.Scheme, mapper, &appsv1.ReplicaSet{}) pod.OwnerReferences = []metav1.OwnerReference{ { Name: "foo1-parent", @@ -688,30 +595,27 @@ var _ = Describe("Eventhandler", func() { } evt := event.CreateEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.Create(evt, q) + instance.Create(ctx, evt, q) Expect(q.Len()).To(Equal(3)) - i, _ := q.Get() - Expect(i).To(Equal(reconcile.Request{ - NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo1-parent"}})) - i, _ = q.Get() - Expect(i).To(Equal(reconcile.Request{ - NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo2-parent"}})) - i, _ = q.Get() - Expect(i).To(Equal(reconcile.Request{ - NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo3-parent"}})) + i1, _ := q.Get() + i2, _ := q.Get() + i3, _ := q.Get() + Expect([]interface{}{i1, i2, i3}).To(ConsistOf( + reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo1-parent"}}, + reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo2-parent"}}, + reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: pod.GetNamespace(), Name: "foo3-parent"}}, + )) }) }) - Context("with a nil metadata object", func() { - It("should do nothing.", func() { - instance := handler.EnqueueRequestForOwner{ - OwnerType: &appsv1.ReplicaSet{}, - } - Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) - Expect(instance.InjectMapper(mapper)).To(Succeed()) + Context("with a nil object", func() { + It("should do nothing.", func(ctx SpecContext) { + instance := handler.EnqueueRequestForOwner(scheme.Scheme, mapper, &appsv1.ReplicaSet{}) pod.OwnerReferences = []metav1.OwnerReference{ { Name: "foo1-parent", @@ -720,86 +624,24 @@ var _ = Describe("Eventhandler", func() { }, } evt := event.CreateEvent{ - Object: pod, - } - instance.Create(evt, q) - Expect(q.Len()).To(Equal(0)) - }) - }) - - Context("with a multiple matching kinds", func() { - It("should do nothing.", func() { - instance := handler.EnqueueRequestForOwner{ - OwnerType: &metav1.ListOptions{}, - } - Expect(instance.InjectScheme(scheme.Scheme)).NotTo(Succeed()) - Expect(instance.InjectMapper(mapper)).To(Succeed()) - pod.OwnerReferences = []metav1.OwnerReference{ - { - Name: "foo1-parent", - Kind: "ListOptions", - APIVersion: "meta/v1", - }, - } - evt := event.CreateEvent{ - Object: pod, - Meta: pod.GetObjectMeta(), - } - instance.Create(evt, q) - Expect(q.Len()).To(Equal(0)) - }) - }) - Context("with an OwnerType that cannot be resolved", func() { - It("should do nothing.", func() { - instance := handler.EnqueueRequestForOwner{ - OwnerType: &controllertest.ErrorType{}, - } - Expect(instance.InjectScheme(scheme.Scheme)).NotTo(Succeed()) - Expect(instance.InjectMapper(mapper)).To(Succeed()) - pod.OwnerReferences = []metav1.OwnerReference{ - { - Name: "foo1-parent", - Kind: "ListOptions", - APIVersion: "meta/v1", - }, + Object: nil, } - evt := event.CreateEvent{ - Object: pod, - Meta: pod.GetObjectMeta(), - } - instance.Create(evt, q) + instance.Create(ctx, evt, q) Expect(q.Len()).To(Equal(0)) }) }) Context("with a nil OwnerType", func() { - It("should do nothing.", func() { - instance := handler.EnqueueRequestForOwner{} - Expect(instance.InjectScheme(scheme.Scheme)).NotTo(Succeed()) - Expect(instance.InjectMapper(mapper)).To(Succeed()) - pod.OwnerReferences = []metav1.OwnerReference{ - { - Name: "foo1-parent", - Kind: "OwnerType", - APIVersion: "meta/v1", - }, - } - evt := event.CreateEvent{ - Object: pod, - Meta: pod.GetObjectMeta(), - } - instance.Create(evt, q) - Expect(q.Len()).To(Equal(0)) + It("should panic", func() { + Expect(func() { + handler.EnqueueRequestForOwner(nil, nil, nil) + }).To(Panic()) }) }) Context("with an invalid APIVersion in the OwnerReference", func() { - It("should do nothing.", func() { - instance := handler.EnqueueRequestForOwner{ - OwnerType: &appsv1.ReplicaSet{}, - } - Expect(instance.InjectScheme(scheme.Scheme)).To(Succeed()) - Expect(instance.InjectMapper(mapper)).To(Succeed()) + It("should do nothing.", func(ctx SpecContext) { + instance := handler.EnqueueRequestForOwner(scheme.Scheme, mapper, &appsv1.ReplicaSet{}) pod.OwnerReferences = []metav1.OwnerReference{ { Name: "foo1-parent", @@ -809,146 +651,518 @@ var _ = Describe("Eventhandler", func() { } evt := event.CreateEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.Create(evt, q) + instance.Create(ctx, evt, q) Expect(q.Len()).To(Equal(0)) }) }) }) Describe("Funcs", func() { - failingFuncs := handler.Funcs{ - CreateFunc: func(event.CreateEvent, workqueue.RateLimitingInterface) { + failingFuncs := handler.TypedFuncs[client.Object, reconcile.Request]{ + CreateFunc: func(context.Context, event.CreateEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Fail("Did not expect CreateEvent to be called.") }, - DeleteFunc: func(event.DeleteEvent, workqueue.RateLimitingInterface) { + DeleteFunc: func(context.Context, event.DeleteEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Fail("Did not expect DeleteEvent to be called.") }, - UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { + UpdateFunc: func(context.Context, event.UpdateEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Fail("Did not expect UpdateEvent to be called.") }, - GenericFunc: func(event.GenericEvent, workqueue.RateLimitingInterface) { + GenericFunc: func(context.Context, event.GenericEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Fail("Did not expect GenericEvent to be called.") }, } - It("should call CreateFunc for a CreateEvent if provided.", func(done Done) { + It("should call CreateFunc for a CreateEvent if provided.", func(ctx SpecContext) { instance := failingFuncs evt := event.CreateEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.CreateFunc = func(evt2 event.CreateEvent, q2 workqueue.RateLimitingInterface) { + instance.CreateFunc = func(ctx context.Context, evt2 event.CreateEvent, q2 workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Expect(q2).To(Equal(q)) Expect(evt2).To(Equal(evt)) } - instance.Create(evt, q) - close(done) + instance.Create(ctx, evt, q) }) - It("should NOT call CreateFunc for a CreateEvent if NOT provided.", func(done Done) { + It("should NOT call CreateFunc for a CreateEvent if NOT provided.", func(ctx SpecContext) { instance := failingFuncs instance.CreateFunc = nil evt := event.CreateEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.Create(evt, q) - close(done) + instance.Create(ctx, evt, q) }) - It("should call UpdateFunc for an UpdateEvent if provided.", func(done Done) { + It("should call UpdateFunc for an UpdateEvent if provided.", func(ctx SpecContext) { newPod := pod.DeepCopy() newPod.Name = pod.Name + "2" newPod.Namespace = pod.Namespace + "2" evt := event.UpdateEvent{ ObjectOld: pod, - MetaOld: pod.GetObjectMeta(), ObjectNew: newPod, - MetaNew: newPod.GetObjectMeta(), } instance := failingFuncs - instance.UpdateFunc = func(evt2 event.UpdateEvent, q2 workqueue.RateLimitingInterface) { + instance.UpdateFunc = func(ctx context.Context, evt2 event.UpdateEvent, q2 workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Expect(q2).To(Equal(q)) Expect(evt2).To(Equal(evt)) } - instance.Update(evt, q) - close(done) + instance.Update(ctx, evt, q) }) - It("should NOT call UpdateFunc for an UpdateEvent if NOT provided.", func(done Done) { + It("should NOT call UpdateFunc for an UpdateEvent if NOT provided.", func(ctx SpecContext) { newPod := pod.DeepCopy() newPod.Name = pod.Name + "2" newPod.Namespace = pod.Namespace + "2" evt := event.UpdateEvent{ ObjectOld: pod, - MetaOld: pod.GetObjectMeta(), ObjectNew: newPod, - MetaNew: newPod.GetObjectMeta(), } - instance.Update(evt, q) - close(done) + instance.Update(ctx, evt, q) }) - It("should call DeleteFunc for a DeleteEvent if provided.", func(done Done) { + It("should call DeleteFunc for a DeleteEvent if provided.", func(ctx SpecContext) { instance := failingFuncs evt := event.DeleteEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.DeleteFunc = func(evt2 event.DeleteEvent, q2 workqueue.RateLimitingInterface) { + instance.DeleteFunc = func(ctx context.Context, evt2 event.DeleteEvent, q2 workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Expect(q2).To(Equal(q)) Expect(evt2).To(Equal(evt)) } - instance.Delete(evt, q) - close(done) + instance.Delete(ctx, evt, q) }) - It("should NOT call DeleteFunc for a DeleteEvent if NOT provided.", func(done Done) { + It("should NOT call DeleteFunc for a DeleteEvent if NOT provided.", func(ctx SpecContext) { instance := failingFuncs instance.DeleteFunc = nil evt := event.DeleteEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.Delete(evt, q) - close(done) + instance.Delete(ctx, evt, q) }) - It("should call GenericFunc for a GenericEvent if provided.", func(done Done) { + It("should call GenericFunc for a GenericEvent if provided.", func(ctx SpecContext) { instance := failingFuncs evt := event.GenericEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.GenericFunc = func(evt2 event.GenericEvent, q2 workqueue.RateLimitingInterface) { + instance.GenericFunc = func(ctx context.Context, evt2 event.GenericEvent, q2 workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Expect(q2).To(Equal(q)) Expect(evt2).To(Equal(evt)) } - instance.Generic(evt, q) - close(done) + instance.Generic(ctx, evt, q) }) - It("should NOT call GenericFunc for a GenericEvent if NOT provided.", func(done Done) { + It("should NOT call GenericFunc for a GenericEvent if NOT provided.", func(ctx SpecContext) { instance := failingFuncs instance.GenericFunc = nil evt := event.GenericEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } - instance.Generic(evt, q) - close(done) + instance.Generic(ctx, evt, q) }) }) + + Describe("WithLowPriorityWhenUnchanged", func() { + handlerPriorityTests := []struct { + name string + handler func() handler.EventHandler + after time.Duration + ratelimited bool + overridePriority int + }{ + { + name: "WithLowPriorityWhenUnchanged wrapper", + handler: func() handler.EventHandler { return handler.WithLowPriorityWhenUnchanged(customHandler{}) }, + }, + { + name: "EnqueueRequestForObject", + handler: func() handler.EventHandler { return &handler.EnqueueRequestForObject{} }, + }, + { + name: "EnqueueRequestForOwner", + handler: func() handler.EventHandler { + return handler.EnqueueRequestForOwner( + scheme.Scheme, + mapper, + &corev1.Pod{}, + ) + }, + }, + { + name: "TypedEnqueueRequestForOwner", + handler: func() handler.EventHandler { + return handler.TypedEnqueueRequestForOwner[client.Object]( + scheme.Scheme, + mapper, + &corev1.Pod{}, + ) + }, + }, + { + name: "Funcs", + handler: func() handler.EventHandler { + return handler.TypedFuncs[client.Object, reconcile.Request]{ + CreateFunc: func(ctx context.Context, tce event.TypedCreateEvent[client.Object], wq workqueue.TypedRateLimitingInterface[reconcile.Request]) { + wq.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: tce.Object.GetNamespace(), + Name: tce.Object.GetName(), + }}) + }, + UpdateFunc: func(ctx context.Context, tue event.TypedUpdateEvent[client.Object], wq workqueue.TypedRateLimitingInterface[reconcile.Request]) { + wq.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: tue.ObjectNew.GetNamespace(), + Name: tue.ObjectNew.GetName(), + }}) + }, + } + }, + }, + { + name: "EnqueueRequestsFromMapFunc", + handler: func() handler.EventHandler { + return handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + return []reconcile.Request{{NamespacedName: types.NamespacedName{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + }}} + }) + }, + }, + { + name: "WithLowPriorityWhenUnchanged - Add", + handler: func() handler.EventHandler { + return handler.WithLowPriorityWhenUnchanged( + handler.TypedFuncs[client.Object, reconcile.Request]{ + CreateFunc: func(ctx context.Context, tce event.TypedCreateEvent[client.Object], wq workqueue.TypedRateLimitingInterface[reconcile.Request]) { + wq.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: tce.Object.GetNamespace(), + Name: tce.Object.GetName(), + }}) + }, + UpdateFunc: func(ctx context.Context, tue event.TypedUpdateEvent[client.Object], wq workqueue.TypedRateLimitingInterface[reconcile.Request]) { + wq.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: tue.ObjectNew.GetNamespace(), + Name: tue.ObjectNew.GetName(), + }}) + }, + }) + }, + }, + { + name: "WithLowPriorityWhenUnchanged - AddAfter", + handler: func() handler.EventHandler { + return handler.WithLowPriorityWhenUnchanged( + handler.TypedFuncs[client.Object, reconcile.Request]{ + CreateFunc: func(ctx context.Context, tce event.TypedCreateEvent[client.Object], wq workqueue.TypedRateLimitingInterface[reconcile.Request]) { + wq.AddAfter(reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: tce.Object.GetNamespace(), + Name: tce.Object.GetName(), + }}, time.Second) + }, + UpdateFunc: func(ctx context.Context, tue event.TypedUpdateEvent[client.Object], wq workqueue.TypedRateLimitingInterface[reconcile.Request]) { + wq.AddAfter(reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: tue.ObjectNew.GetNamespace(), + Name: tue.ObjectNew.GetName(), + }}, time.Second) + }, + }) + }, + after: time.Second, + }, + { + name: "WithLowPriorityWhenUnchanged - AddRateLimited", + handler: func() handler.EventHandler { + return handler.WithLowPriorityWhenUnchanged( + handler.TypedFuncs[client.Object, reconcile.Request]{ + CreateFunc: func(ctx context.Context, tce event.TypedCreateEvent[client.Object], wq workqueue.TypedRateLimitingInterface[reconcile.Request]) { + wq.AddRateLimited(reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: tce.Object.GetNamespace(), + Name: tce.Object.GetName(), + }}) + }, + UpdateFunc: func(ctx context.Context, tue event.TypedUpdateEvent[client.Object], wq workqueue.TypedRateLimitingInterface[reconcile.Request]) { + wq.AddRateLimited(reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: tue.ObjectNew.GetNamespace(), + Name: tue.ObjectNew.GetName(), + }}) + }, + }) + }, + ratelimited: true, + }, + { + name: "WithLowPriorityWhenUnchanged - AddWithOpts priority is retained", + handler: func() handler.EventHandler { + return handler.WithLowPriorityWhenUnchanged( + handler.TypedFuncs[client.Object, reconcile.Request]{ + CreateFunc: func(ctx context.Context, tce event.TypedCreateEvent[client.Object], wq workqueue.TypedRateLimitingInterface[reconcile.Request]) { + if pq, isPQ := wq.(priorityqueue.PriorityQueue[reconcile.Request]); isPQ { + pq.AddWithOpts(priorityqueue.AddOpts{Priority: ptr.To(100)}, reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: tce.Object.GetNamespace(), + Name: tce.Object.GetName(), + }}) + return + } + wq.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: tce.Object.GetNamespace(), + Name: tce.Object.GetName(), + }}) + }, + UpdateFunc: func(ctx context.Context, tue event.TypedUpdateEvent[client.Object], wq workqueue.TypedRateLimitingInterface[reconcile.Request]) { + if pq, isPQ := wq.(priorityqueue.PriorityQueue[reconcile.Request]); isPQ { + pq.AddWithOpts(priorityqueue.AddOpts{Priority: ptr.To(100)}, reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: tue.ObjectNew.GetNamespace(), + Name: tue.ObjectNew.GetName(), + }}) + return + } + wq.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: tue.ObjectNew.GetNamespace(), + Name: tue.ObjectNew.GetName(), + }}) + }, + }) + }, + overridePriority: 100, + }, + } + for _, test := range handlerPriorityTests { + When("handler is "+test.name, func() { + It("should lower the priority of a create request for an object that was part of the initial list", func(ctx SpecContext) { + actualOpts := priorityqueue.AddOpts{} + var actualRequests []reconcile.Request + wq := &fakePriorityQueue{ + addWithOpts: func(o priorityqueue.AddOpts, items ...reconcile.Request) { + actualOpts = o + actualRequests = items + }, + } + + test.handler().Create(ctx, event.CreateEvent{ + Object: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{ + Name: "my-pod", + CreationTimestamp: metav1.Now(), + OwnerReferences: []metav1.OwnerReference{{ + Kind: "Pod", + Name: "my-pod", + }}, + }}, + IsInInitialList: true, + }, wq) + + expected := handler.LowPriority + if test.overridePriority != 0 { + expected = test.overridePriority + } + + Expect(actualOpts).To(Equal(priorityqueue.AddOpts{ + Priority: ptr.To(expected), + After: test.after, + RateLimited: test.ratelimited, + })) + Expect(actualRequests).To(Equal([]reconcile.Request{{NamespacedName: types.NamespacedName{Name: "my-pod"}}})) + }) + + It("should not lower the priority of a create request for an object that was not part of the initial list", func(ctx SpecContext) { + actualOpts := priorityqueue.AddOpts{} + var actualRequests []reconcile.Request + wq := &fakePriorityQueue{ + addWithOpts: func(o priorityqueue.AddOpts, items ...reconcile.Request) { + actualOpts = o + actualRequests = items + }, + } + + test.handler().Create(ctx, event.CreateEvent{ + Object: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{ + Name: "my-pod", + CreationTimestamp: metav1.Now(), + OwnerReferences: []metav1.OwnerReference{{ + Kind: "Pod", + Name: "my-pod", + }}, + }}, + IsInInitialList: false, + }, wq) + + var expectedPriority *int + if test.overridePriority != 0 { + expectedPriority = &test.overridePriority + } + + Expect(actualOpts).To(Equal(priorityqueue.AddOpts{After: test.after, RateLimited: test.ratelimited, Priority: expectedPriority})) + Expect(actualRequests).To(Equal([]reconcile.Request{{NamespacedName: types.NamespacedName{Name: "my-pod"}}})) + }) + + It("should lower the priority of an update request with unchanged RV", func(ctx SpecContext) { + actualOpts := priorityqueue.AddOpts{} + var actualRequests []reconcile.Request + wq := &fakePriorityQueue{ + addWithOpts: func(o priorityqueue.AddOpts, items ...reconcile.Request) { + actualOpts = o + actualRequests = items + }, + } + + test.handler().Update(ctx, event.UpdateEvent{ + ObjectOld: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{ + Name: "my-pod", + OwnerReferences: []metav1.OwnerReference{{ + Kind: "Pod", + Name: "my-pod", + }}, + }}, + ObjectNew: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{ + Name: "my-pod", + OwnerReferences: []metav1.OwnerReference{{ + Kind: "Pod", + Name: "my-pod", + }}, + }}, + }, wq) + + expectedPriority := handler.LowPriority + if test.overridePriority != 0 { + expectedPriority = test.overridePriority + } + + Expect(actualOpts).To(Equal(priorityqueue.AddOpts{After: test.after, RateLimited: test.ratelimited, Priority: ptr.To(expectedPriority)})) + Expect(actualRequests).To(Equal([]reconcile.Request{{NamespacedName: types.NamespacedName{Name: "my-pod"}}})) + }) + + It("should not lower the priority of an update request with changed RV", func(ctx SpecContext) { + actualOpts := priorityqueue.AddOpts{} + var actualRequests []reconcile.Request + wq := &fakePriorityQueue{ + addWithOpts: func(o priorityqueue.AddOpts, items ...reconcile.Request) { + actualOpts = o + actualRequests = items + }, + } + + test.handler().Update(ctx, event.UpdateEvent{ + ObjectOld: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{ + Name: "my-pod", + OwnerReferences: []metav1.OwnerReference{{ + Kind: "Pod", + Name: "my-pod", + }}, + }}, + ObjectNew: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{ + Name: "my-pod", + ResourceVersion: "1", + OwnerReferences: []metav1.OwnerReference{{ + Kind: "Pod", + Name: "my-pod", + }}, + }}, + }, wq) + + var expectedPriority *int + if test.overridePriority != 0 { + expectedPriority = &test.overridePriority + } + Expect(actualOpts).To(Equal(priorityqueue.AddOpts{After: test.after, RateLimited: test.ratelimited, Priority: expectedPriority})) + Expect(actualRequests).To(Equal([]reconcile.Request{{NamespacedName: types.NamespacedName{Name: "my-pod"}}})) + }) + + It("should have no effect on create if the workqueue is not a priorityqueue", func(ctx SpecContext) { + test.handler().Create(ctx, event.CreateEvent{ + Object: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{ + Name: "my-pod", + OwnerReferences: []metav1.OwnerReference{{ + Kind: "Pod", + Name: "my-pod", + }}, + }}, + }, q) + + Expect(q.Len()).To(Equal(1)) + item, _ := q.Get() + Expect(item).To(Equal(reconcile.Request{NamespacedName: types.NamespacedName{Name: "my-pod"}})) + }) + + It("should have no effect on Update if the workqueue is not a priorityqueue", func(ctx SpecContext) { + test.handler().Update(ctx, event.UpdateEvent{ + ObjectOld: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{ + Name: "my-pod", + OwnerReferences: []metav1.OwnerReference{{ + Kind: "Pod", + Name: "my-pod", + }}, + }}, + ObjectNew: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{ + Name: "my-pod", + OwnerReferences: []metav1.OwnerReference{{ + Kind: "Pod", + Name: "my-pod", + }}, + }}, + }, q) + + Expect(q.Len()).To(Equal(1)) + item, _ := q.Get() + Expect(item).To(Equal(reconcile.Request{NamespacedName: types.NamespacedName{Name: "my-pod"}})) + }) + }) + } + }) }) + +type fakePriorityQueue struct { + workqueue.TypedRateLimitingInterface[reconcile.Request] + addWithOpts func(o priorityqueue.AddOpts, items ...reconcile.Request) +} + +func (f *fakePriorityQueue) Add(item reconcile.Request) { + f.AddWithOpts(priorityqueue.AddOpts{}, item) +} + +func (f *fakePriorityQueue) AddWithOpts(o priorityqueue.AddOpts, items ...reconcile.Request) { + f.addWithOpts(o, items...) +} +func (f *fakePriorityQueue) GetWithPriority() (item reconcile.Request, priority int, shutdown bool) { + panic("GetWithPriority is not expected to be called") +} + +// customHandler re-implements the basic enqueueRequestForObject logic +// to be able to test the WithLowPriorityWhenUnchanged wrapper +type customHandler struct{} + +func (ch customHandler) Create(ctx context.Context, evt event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: evt.Object.GetNamespace(), + Name: evt.Object.GetName(), + }}) +} +func (ch customHandler) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: evt.ObjectNew.GetNamespace(), + Name: evt.ObjectNew.GetName(), + }}) +} +func (ch customHandler) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: evt.Object.GetNamespace(), + Name: evt.Object.GetName(), + }}) +} +func (ch customHandler) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Namespace: evt.Object.GetNamespace(), + Name: evt.Object.GetName(), + }}) +} diff --git a/pkg/handler/example_test.go b/pkg/handler/example_test.go index 7ead7632cb..ad87e4be63 100644 --- a/pkg/handler/example_test.go +++ b/pkg/handler/example_test.go @@ -17,6 +17,8 @@ limitations under the License. package handler_test import ( + "context" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -24,19 +26,22 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" ) -var c controller.Controller +var ( + mgr manager.Manager + c controller.Controller +) // This example watches Pods and enqueues Requests with the Name and Namespace of the Pod from // the Event (i.e. change caused by a Create, Update, Delete). func ExampleEnqueueRequestForObject() { // controller is a controller.controller err := c.Watch( - &source.Kind{Type: &corev1.Pod{}}, - &handler.EnqueueRequestForObject{}, + source.Kind(mgr.GetCache(), &corev1.Pod{}, &handler.TypedEnqueueRequestForObject[*corev1.Pod]{}), ) if err != nil { // handle it @@ -48,11 +53,10 @@ func ExampleEnqueueRequestForObject() { func ExampleEnqueueRequestForOwner() { // controller is a controller.controller err := c.Watch( - &source.Kind{Type: &appsv1.ReplicaSet{}}, - &handler.EnqueueRequestForOwner{ - OwnerType: &appsv1.Deployment{}, - IsController: true, - }, + source.Kind(mgr.GetCache(), + &appsv1.ReplicaSet{}, + handler.TypedEnqueueRequestForOwner[*appsv1.ReplicaSet](mgr.GetScheme(), mgr.GetRESTMapper(), &appsv1.Deployment{}, handler.OnlyControllerOwner()), + ), ) if err != nil { // handle it @@ -64,21 +68,21 @@ func ExampleEnqueueRequestForOwner() { func ExampleEnqueueRequestsFromMapFunc() { // controller is a controller.controller err := c.Watch( - &source.Kind{Type: &appsv1.Deployment{}}, - &handler.EnqueueRequestsFromMapFunc{ - ToRequests: handler.ToRequestsFunc(func(a handler.MapObject) []reconcile.Request { + source.Kind(mgr.GetCache(), &appsv1.Deployment{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, a *appsv1.Deployment) []reconcile.Request { return []reconcile.Request{ {NamespacedName: types.NamespacedName{ - Name: a.Meta.GetName() + "-1", - Namespace: a.Meta.GetNamespace(), + Name: a.Name + "-1", + Namespace: a.Namespace, }}, {NamespacedName: types.NamespacedName{ - Name: a.Meta.GetName() + "-2", - Namespace: a.Meta.GetNamespace(), + Name: a.Name + "-2", + Namespace: a.Namespace, }}, } }), - }) + ), + ) if err != nil { // handle it } @@ -88,33 +92,34 @@ func ExampleEnqueueRequestsFromMapFunc() { func ExampleFuncs() { // controller is a controller.controller err := c.Watch( - &source.Kind{Type: &corev1.Pod{}}, - handler.Funcs{ - CreateFunc: func(e event.CreateEvent, q workqueue.RateLimitingInterface) { - q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ - Name: e.Meta.GetName(), - Namespace: e.Meta.GetNamespace(), - }}) - }, - UpdateFunc: func(e event.UpdateEvent, q workqueue.RateLimitingInterface) { - q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ - Name: e.MetaNew.GetName(), - Namespace: e.MetaNew.GetNamespace(), - }}) - }, - DeleteFunc: func(e event.DeleteEvent, q workqueue.RateLimitingInterface) { - q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ - Name: e.Meta.GetName(), - Namespace: e.Meta.GetNamespace(), - }}) - }, - GenericFunc: func(e event.GenericEvent, q workqueue.RateLimitingInterface) { - q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ - Name: e.Meta.GetName(), - Namespace: e.Meta.GetNamespace(), - }}) + source.Kind(mgr.GetCache(), &corev1.Pod{}, + handler.TypedFuncs[*corev1.Pod, reconcile.Request]{ + CreateFunc: func(ctx context.Context, e event.TypedCreateEvent[*corev1.Pod], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: e.Object.Name, + Namespace: e.Object.Namespace, + }}) + }, + UpdateFunc: func(ctx context.Context, e event.TypedUpdateEvent[*corev1.Pod], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: e.ObjectNew.Name, + Namespace: e.ObjectNew.Namespace, + }}) + }, + DeleteFunc: func(ctx context.Context, e event.TypedDeleteEvent[*corev1.Pod], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: e.Object.Name, + Namespace: e.Object.Namespace, + }}) + }, + GenericFunc: func(ctx context.Context, e event.TypedGenericEvent[*corev1.Pod], q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: e.Object.Name, + Namespace: e.Object.Namespace, + }}) + }, }, - }, + ), ) if err != nil { // handle it diff --git a/pkg/healthz/doc.go b/pkg/healthz/doc.go new file mode 100644 index 0000000000..9827eeafed --- /dev/null +++ b/pkg/healthz/doc.go @@ -0,0 +1,32 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package healthz contains helpers from supporting liveness and readiness endpoints. +// (often referred to as healthz and readyz, respectively). +// +// This package draws heavily from the apiserver's healthz package +// ( https://github.com/kubernetes/apiserver/tree/master/pkg/server/healthz ) +// but has some changes to bring it in line with controller-runtime's style. +// +// The main entrypoint is the Handler -- this serves both aggregated health status +// and individual health check endpoints. +package healthz + +import ( + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("healthz") diff --git a/pkg/healthz/healthz.go b/pkg/healthz/healthz.go new file mode 100644 index 0000000000..cfb5dc8d02 --- /dev/null +++ b/pkg/healthz/healthz.go @@ -0,0 +1,206 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package healthz + +import ( + "fmt" + "net/http" + "path" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// Handler is an http.Handler that aggregates the results of the given +// checkers to the root path, and supports calling individual checkers on +// subpaths of the name of the checker. +// +// Adding checks on the fly is *not* threadsafe -- use a wrapper. +type Handler struct { + Checks map[string]Checker +} + +// checkStatus holds the output of a particular check. +type checkStatus struct { + name string + healthy bool + excluded bool +} + +func (h *Handler) serveAggregated(resp http.ResponseWriter, req *http.Request) { + failed := false + excluded := getExcludedChecks(req) + + parts := make([]checkStatus, 0, len(h.Checks)) + + // calculate the results... + for checkName, check := range h.Checks { + // no-op the check if we've specified we want to exclude the check + if excluded.Has(checkName) { + excluded.Delete(checkName) + parts = append(parts, checkStatus{name: checkName, healthy: true, excluded: true}) + continue + } + if err := check(req); err != nil { + log.V(1).Info("healthz check failed", "checker", checkName, "error", err) + parts = append(parts, checkStatus{name: checkName, healthy: false}) + failed = true + } else { + parts = append(parts, checkStatus{name: checkName, healthy: true}) + } + } + + // ...default a check if none is present... + if len(h.Checks) == 0 { + parts = append(parts, checkStatus{name: "ping", healthy: true}) + } + + for _, c := range excluded.UnsortedList() { + log.V(1).Info("cannot exclude health check, no matches for it", "checker", c) + } + + // ...sort to be consistent... + sort.Slice(parts, func(i, j int) bool { return parts[i].name < parts[j].name }) + + // ...and write out the result + // TODO(directxman12): this should also accept a request for JSON content (via a accept header) + _, forceVerbose := req.URL.Query()["verbose"] + writeStatusesAsText(resp, parts, excluded, failed, forceVerbose) +} + +// writeStatusAsText writes out the given check statuses in some semi-arbitrary +// bespoke text format that we copied from Kubernetes. unknownExcludes lists +// any checks that the user requested to have excluded, but weren't actually +// known checks. writeStatusAsText is always verbose on failure, and can be +// forced to be verbose on success using the given argument. +func writeStatusesAsText(resp http.ResponseWriter, parts []checkStatus, unknownExcludes sets.Set[string], failed, forceVerbose bool) { + resp.Header().Set("Content-Type", "text/plain; charset=utf-8") + resp.Header().Set("X-Content-Type-Options", "nosniff") + + // always write status code first + if failed { + resp.WriteHeader(http.StatusInternalServerError) + } else { + resp.WriteHeader(http.StatusOK) + } + + // shortcut for easy non-verbose success + if !failed && !forceVerbose { + fmt.Fprint(resp, "ok") + return + } + + // we're always verbose on failure, so from this point on we're guaranteed to be verbose + + for _, checkOut := range parts { + switch { + case checkOut.excluded: + fmt.Fprintf(resp, "[+]%s excluded: ok\n", checkOut.name) + case checkOut.healthy: + fmt.Fprintf(resp, "[+]%s ok\n", checkOut.name) + default: + // don't include the error since this endpoint is public. If someone wants more detail + // they should have explicit permission to the detailed checks. + fmt.Fprintf(resp, "[-]%s failed: reason withheld\n", checkOut.name) + } + } + + if unknownExcludes.Len() > 0 { + fmt.Fprintf(resp, "warn: some health checks cannot be excluded: no matches for %s\n", formatQuoted(unknownExcludes.UnsortedList()...)) + } + + if failed { + log.Info("healthz check failed", "statuses", parts) + fmt.Fprintf(resp, "healthz check failed\n") + } else { + fmt.Fprint(resp, "healthz check passed\n") + } +} + +func (h *Handler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + // clean up the request (duplicating the internal logic of http.ServeMux a bit) + // clean up the path a bit + reqPath := req.URL.Path + if reqPath == "" || reqPath[0] != '/' { + reqPath = "/" + reqPath + } + // path.Clean removes the trailing slash except for root for us + // (which is fine, since we're only serving one layer of sub-paths) + reqPath = path.Clean(reqPath) + + // either serve the root endpoint... + if reqPath == "/" { + h.serveAggregated(resp, req) + return + } + + // ...the default check (if nothing else is present)... + if len(h.Checks) == 0 && reqPath[1:] == "ping" { + CheckHandler{Checker: Ping}.ServeHTTP(resp, req) + return + } + + // ...or an individual checker + checkName := reqPath[1:] // ignore the leading slash + checker, known := h.Checks[checkName] + if !known { + http.NotFoundHandler().ServeHTTP(resp, req) + return + } + + CheckHandler{Checker: checker}.ServeHTTP(resp, req) +} + +// CheckHandler is an http.Handler that serves a health check endpoint at the root path, +// based on its checker. +type CheckHandler struct { + Checker +} + +func (h CheckHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + if err := h.Checker(req); err != nil { + http.Error(resp, fmt.Sprintf("internal server error: %v", err), http.StatusInternalServerError) + } else { + fmt.Fprint(resp, "ok") + } +} + +// Checker knows how to perform a health check. +type Checker func(req *http.Request) error + +// Ping returns true automatically when checked. +var Ping Checker = func(_ *http.Request) error { return nil } + +// getExcludedChecks extracts the health check names to be excluded from the query param. +func getExcludedChecks(r *http.Request) sets.Set[string] { + checks, found := r.URL.Query()["exclude"] + if found { + return sets.New[string](checks...) + } + return sets.New[string]() +} + +// formatQuoted returns a formatted string of the health check names, +// preserving the order passed in. +func formatQuoted(names ...string) string { + quoted := make([]string, 0, len(names)) + for _, name := range names { + quoted = append(quoted, fmt.Sprintf("%q", name)) + } + return strings.Join(quoted, ",") +} diff --git a/pkg/manager/testutil.go b/pkg/healthz/healthz_suite_test.go similarity index 59% rename from pkg/manager/testutil.go rename to pkg/healthz/healthz_suite_test.go index a09dc192f6..8e16a58aa0 100644 --- a/pkg/manager/testutil.go +++ b/pkg/healthz/healthz_suite_test.go @@ -14,12 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -package manager +package healthz_test -// func SetCacheForTest(options *Options, c func(config *rest.Config, opts cache.Options) (cache.Cache, error)) { -// options.newCache = c -// } +import ( + "testing" -// func SetClientForTest(options *Options, c func(config *rest.Config, options client.Options) (client.Client, error)) { -// options.newClient = c -// } + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestHealthz(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Healthz Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) +}) diff --git a/pkg/healthz/healthz_test.go b/pkg/healthz/healthz_test.go new file mode 100644 index 0000000000..639a7575f3 --- /dev/null +++ b/pkg/healthz/healthz_test.go @@ -0,0 +1,203 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package healthz_test + +import ( + "errors" + "net/http" + "net/http/httptest" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/healthz" +) + +const ( + contentType = "text/plain; charset=utf-8" +) + +func requestTo(handler http.Handler, dest string) *httptest.ResponseRecorder { + req, err := http.NewRequest("GET", dest, nil) + Expect(err).NotTo(HaveOccurred()) + resp := httptest.NewRecorder() + handler.ServeHTTP(resp, req) + + return resp +} + +var _ = Describe("Healthz Handler", func() { + Describe("the aggregated endpoint", func() { + It("should return healthy if all checks succeed", func() { + handler := &healthz.Handler{Checks: map[string]healthz.Checker{ + "ok1": healthz.Ping, + "ok2": healthz.Ping, + }} + + resp := requestTo(handler, "/") + Expect(resp.Code).To(Equal(http.StatusOK)) + }) + + It("should return unhealthy if at least one check fails", func() { + handler := &healthz.Handler{Checks: map[string]healthz.Checker{ + "ok1": healthz.Ping, + "bad1": func(req *http.Request) error { + return errors.New("blech") + }, + }} + + resp := requestTo(handler, "/") + Expect(resp.Code).To(Equal(http.StatusInternalServerError)) + }) + + It("should ingore excluded checks when determining health", func() { + handler := &healthz.Handler{Checks: map[string]healthz.Checker{ + "ok1": healthz.Ping, + "bad1": func(req *http.Request) error { + return errors.New("blech") + }, + }} + + resp := requestTo(handler, "/?exclude=bad1") + Expect(resp.Code).To(Equal(http.StatusOK)) + }) + + It("should be fine if asked to exclude a check that doesn't exist", func() { + handler := &healthz.Handler{Checks: map[string]healthz.Checker{ + "ok1": healthz.Ping, + "ok2": healthz.Ping, + }} + + resp := requestTo(handler, "/?exclude=nonexistant") + Expect(resp.Code).To(Equal(http.StatusOK)) + }) + + Context("when verbose output is requested with ?verbose=true", func() { + It("should return verbose output for ok cases", func() { + handler := &healthz.Handler{Checks: map[string]healthz.Checker{ + "ok1": healthz.Ping, + "ok2": healthz.Ping, + }} + + resp := requestTo(handler, "/?verbose=true") + Expect(resp.Code).To(Equal(http.StatusOK)) + Expect(resp.Header().Get("Content-Type")).To(Equal(contentType)) + Expect(resp.Body.String()).To(Equal("[+]ok1 ok\n[+]ok2 ok\nhealthz check passed\n")) + }) + + It("should return verbose output for failures", func() { + handler := &healthz.Handler{Checks: map[string]healthz.Checker{ + "ok1": healthz.Ping, + "bad1": func(req *http.Request) error { + return errors.New("blech") + }, + }} + + resp := requestTo(handler, "/?verbose=true") + Expect(resp.Code).To(Equal(http.StatusInternalServerError)) + Expect(resp.Header().Get("Content-Type")).To(Equal(contentType)) + Expect(resp.Body.String()).To(Equal("[-]bad1 failed: reason withheld\n[+]ok1 ok\nhealthz check failed\n")) + }) + }) + + It("should return non-verbose output when healthy and not specified as verbose", func() { + handler := &healthz.Handler{Checks: map[string]healthz.Checker{ + "ok1": healthz.Ping, + "ok2": healthz.Ping, + }} + + resp := requestTo(handler, "/") + Expect(resp.Header().Get("Content-Type")).To(Equal(contentType)) + Expect(resp.Body.String()).To(Equal("ok")) + + }) + + It("should always be verbose if a check fails", func() { + handler := &healthz.Handler{Checks: map[string]healthz.Checker{ + "ok1": healthz.Ping, + "bad1": func(req *http.Request) error { + return errors.New("blech") + }, + }} + + resp := requestTo(handler, "/") + Expect(resp.Header().Get("Content-Type")).To(Equal(contentType)) + Expect(resp.Body.String()).To(Equal("[-]bad1 failed: reason withheld\n[+]ok1 ok\nhealthz check failed\n")) + }) + + It("should always return a ping endpoint if no other ones are present", func() { + resp := requestTo(&healthz.Handler{}, "/?verbose=true") + Expect(resp.Code).To(Equal(http.StatusOK)) + Expect(resp.Header().Get("Content-Type")).To(Equal(contentType)) + Expect(resp.Body.String()).To(Equal("[+]ping ok\nhealthz check passed\n")) + }) + }) + + Describe("the per-check endpoints", func() { + It("should return ok if the requested check is healthy", func() { + handler := &healthz.Handler{Checks: map[string]healthz.Checker{ + "okcheck": healthz.Ping, + }} + + resp := requestTo(handler, "/okcheck") + Expect(resp.Code).To(Equal(http.StatusOK)) + Expect(resp.Header().Get("Content-Type")).To(Equal(contentType)) + Expect(resp.Body.String()).To(Equal("ok")) + }) + + It("should return an error if the requested check is unhealthy", func() { + handler := &healthz.Handler{Checks: map[string]healthz.Checker{ + "failcheck": func(req *http.Request) error { + return errors.New("blech") + }, + }} + + resp := requestTo(handler, "/failcheck") + Expect(resp.Code).To(Equal(http.StatusInternalServerError)) + Expect(resp.Header().Get("Content-Type")).To(Equal(contentType)) + Expect(resp.Body.String()).To(Equal("internal server error: blech\n")) + }) + + It("shouldn't take other checks into account", func() { + handler := &healthz.Handler{Checks: map[string]healthz.Checker{ + "failcheck": func(req *http.Request) error { + return errors.New("blech") + }, + "okcheck": healthz.Ping, + }} + + By("checking the bad endpoint and expecting it to fail") + resp := requestTo(handler, "/failcheck") + Expect(resp.Code).To(Equal(http.StatusInternalServerError)) + + By("checking the good endpoint and expecting it to succeed") + resp = requestTo(handler, "/okcheck") + Expect(resp.Code).To(Equal(http.StatusOK)) + }) + + It("should return non-found for paths that don't match a checker", func() { + handler := &healthz.Handler{} + + resp := requestTo(handler, "/doesnotexist") + Expect(resp.Code).To(Equal(http.StatusNotFound)) + }) + + It("should always return a ping endpoint if no other ones are present", func() { + resp := requestTo(&healthz.Handler{}, "/ping") + Expect(resp.Code).To(Equal(http.StatusOK)) + }) + }) +}) diff --git a/pkg/internal/controller/controller.go b/pkg/internal/controller/controller.go index 5e07d298bd..7dd06957eb 100644 --- a/pkg/internal/controller/controller.go +++ b/pkg/internal/controller/controller.go @@ -17,33 +17,79 @@ limitations under the License. package controller import ( + "context" + "errors" "fmt" "sync" + "sync/atomic" "time" - "k8s.io/apimachinery/pkg/runtime" + "github.com/go-logr/logr" + "golang.org/x/sync/errgroup" + "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/record" + "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/client-go/util/workqueue" - "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/handler" + "k8s.io/utils/ptr" + + "sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue" ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics" - logf "sigs.k8s.io/controller-runtime/pkg/internal/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/source" ) -var log = logf.RuntimeLog.WithName("controller") +// Options are the arguments for creating a new Controller. +type Options[request comparable] struct { + // Reconciler is a function that can be called at any time with the Name / Namespace of an object and + // ensures that the state of the system matches the state specified in the object. + // Defaults to the DefaultReconcileFunc. + Do reconcile.TypedReconciler[request] + + // RateLimiter is used to limit how frequently requests may be queued into the work queue. + RateLimiter workqueue.TypedRateLimiter[request] + + // NewQueue constructs the queue for this controller once the controller is ready to start. + // This is a func because the standard Kubernetes work queues start themselves immediately, which + // leads to goroutine leaks if something calls controller.New repeatedly. + NewQueue func(controllerName string, rateLimiter workqueue.TypedRateLimiter[request]) workqueue.TypedRateLimitingInterface[request] + + // MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1. + MaxConcurrentReconciles int + + // CacheSyncTimeout refers to the time limit set on waiting for cache to sync + // Defaults to 2 minutes if not set. + CacheSyncTimeout time.Duration + + // Name is used to uniquely identify a Controller in tracing, logging and monitoring. Name is required. + Name string + + // LogConstructor is used to construct a logger to then log messages to users during reconciliation, + // or for example when a watch is started. + // Note: LogConstructor has to be able to handle nil requests as we are also using it + // outside the context of a reconciliation. + LogConstructor func(request *request) logr.Logger -var _ inject.Injector = &Controller{} + // RecoverPanic indicates whether the panic caused by reconcile should be recovered. + // Defaults to true. + RecoverPanic *bool -// Controller implements controller.Controller -type Controller struct { + // LeaderElected indicates whether the controller is leader elected or always running. + LeaderElected *bool + + // EnableWarmup specifies whether the controller should start its sources + // when the manager is not the leader. + // Defaults to false, which means that the controller will wait for leader election to start + // before starting sources. + EnableWarmup *bool + + // ReconciliationTimeout is used as the timeout passed to the context of each Reconcile call. + // By default, there is no timeout. + ReconciliationTimeout time.Duration +} + +// Controller implements controller.Controller. +type Controller[request comparable] struct { // Name is used to uniquely identify a Controller in tracing, logging and monitoring. Name is required. Name string @@ -53,133 +99,309 @@ type Controller struct { // Reconciler is a function that can be called at any time with the Name / Namespace of an object and // ensures that the state of the system matches the state specified in the object. // Defaults to the DefaultReconcileFunc. - Do reconcile.Reconciler + Do reconcile.TypedReconciler[request] - // Client is a lazily initialized Client. The controllerManager will initialize this when Start is called. - Client client.Client + // RateLimiter is used to limit how frequently requests may be queued into the work queue. + RateLimiter workqueue.TypedRateLimiter[request] - // Scheme is injected by the controllerManager when controllerManager.Start is called - Scheme *runtime.Scheme - - // informers are injected by the controllerManager when controllerManager.Start is called - Cache cache.Cache - - // Config is the rest.Config used to talk to the apiserver. Defaults to one of in-cluster, environment variable - // specified, or the ~/.kube/Config. - Config *rest.Config + // NewQueue constructs the queue for this controller once the controller is ready to start. + // This is a func because the standard Kubernetes work queues start themselves immediately, which + // leads to goroutine leaks if something calls controller.New repeatedly. + NewQueue func(controllerName string, rateLimiter workqueue.TypedRateLimiter[request]) workqueue.TypedRateLimitingInterface[request] // Queue is an listeningQueue that listens for events from Informers and adds object keys to // the Queue for processing - Queue workqueue.RateLimitingInterface - - // SetFields is used to inject dependencies into other objects such as Sources, EventHandlers and Predicates - SetFields func(i interface{}) error + Queue priorityqueue.PriorityQueue[request] // mu is used to synchronize Controller setup mu sync.Mutex - // JitterPeriod allows tests to reduce the JitterPeriod so they complete faster - JitterPeriod time.Duration - - // WaitForCacheSync allows tests to mock out the WaitForCacheSync function to return an error - // defaults to Cache.WaitForCacheSync - WaitForCacheSync func(stopCh <-chan struct{}) bool - // Started is true if the Controller has been Started Started bool - // Recorder is an event recorder for recording Event resources to the - // Kubernetes API. - Recorder record.EventRecorder + // ctx is the context that was passed to Start() and used when starting watches. + // + // According to the docs, contexts should not be stored in a struct: https://golang.org/pkg/context, + // while we usually always strive to follow best practices, we consider this a legacy case and it should + // undergo a major refactoring and redesign to allow for context to not be stored in a struct. + ctx context.Context + + // CacheSyncTimeout refers to the time limit set on waiting for cache to sync + // Defaults to 2 minutes if not set. + CacheSyncTimeout time.Duration + + // startWatches maintains a list of sources, handlers, and predicates to start when the controller is started. + startWatches []source.TypedSource[request] + + // startedEventSourcesAndQueue is used to track if the event sources have been started. + // It ensures that we append sources to c.startWatches only until we call Start() / Warmup() + // It is true if startEventSourcesAndQueueLocked has been called at least once. + startedEventSourcesAndQueue bool + + // didStartEventSourcesOnce is used to ensure that the event sources are only started once. + didStartEventSourcesOnce sync.Once + + // LogConstructor is used to construct a logger to then log messages to users during reconciliation, + // or for example when a watch is started. + // Note: LogConstructor has to be able to handle nil requests as we are also using it + // outside the context of a reconciliation. + LogConstructor func(request *request) logr.Logger + + // RecoverPanic indicates whether the panic caused by reconcile should be recovered. + // Defaults to true. + RecoverPanic *bool + + // LeaderElected indicates whether the controller is leader elected or always running. + LeaderElected *bool + + // EnableWarmup specifies whether the controller should start its sources when the manager is not + // the leader. This is useful for cases where sources take a long time to start, as it allows + // for the controller to warm up its caches even before it is elected as the leader. This + // improves leadership failover time, as the caches will be prepopulated before the controller + // transitions to be leader. + // + // Setting EnableWarmup to true and NeedLeaderElection to true means the controller will start its + // sources without waiting to become leader. + // Setting EnableWarmup to true and NeedLeaderElection to false is a no-op as controllers without + // leader election do not wait on leader election to start their sources. + // Defaults to false. + EnableWarmup *bool + + ReconciliationTimeout time.Duration +} - // TODO(community): Consider initializing a logger with the Controller Name as the tag +// New returns a new Controller configured with the given options. +func New[request comparable](options Options[request]) *Controller[request] { + return &Controller[request]{ + Do: options.Do, + RateLimiter: options.RateLimiter, + NewQueue: options.NewQueue, + MaxConcurrentReconciles: options.MaxConcurrentReconciles, + CacheSyncTimeout: options.CacheSyncTimeout, + Name: options.Name, + LogConstructor: options.LogConstructor, + RecoverPanic: options.RecoverPanic, + LeaderElected: options.LeaderElected, + EnableWarmup: options.EnableWarmup, + ReconciliationTimeout: options.ReconciliationTimeout, + } } -// Reconcile implements reconcile.Reconciler -func (c *Controller) Reconcile(r reconcile.Request) (reconcile.Result, error) { - return c.Do.Reconcile(r) +// Reconcile implements reconcile.Reconciler. +func (c *Controller[request]) Reconcile(ctx context.Context, req request) (_ reconcile.Result, err error) { + defer func() { + if r := recover(); r != nil { + ctrlmetrics.ReconcilePanics.WithLabelValues(c.Name).Inc() + + if c.RecoverPanic == nil || *c.RecoverPanic { + for _, fn := range utilruntime.PanicHandlers { + fn(ctx, r) + } + err = fmt.Errorf("panic: %v [recovered]", r) + return + } + + log := logf.FromContext(ctx) + log.Info(fmt.Sprintf("Observed a panic in reconciler: %v", r)) + panic(r) + } + }() + + if c.ReconciliationTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, c.ReconciliationTimeout) + defer cancel() + } + + return c.Do.Reconcile(ctx, req) } -// Watch implements controller.Controller -func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prct ...predicate.Predicate) error { +// Watch implements controller.Controller. +func (c *Controller[request]) Watch(src source.TypedSource[request]) error { c.mu.Lock() defer c.mu.Unlock() - // Inject Cache into arguments - if err := c.SetFields(src); err != nil { - return err + // Sources weren't started yet, store the watches locally and return. + // These sources are going to be held until either Warmup() or Start(...) is called. + if !c.startedEventSourcesAndQueue { + c.startWatches = append(c.startWatches, src) + return nil } - if err := c.SetFields(evthdler); err != nil { - return err + + c.LogConstructor(nil).Info("Starting EventSource", "source", src) + return src.Start(c.ctx, c.Queue) +} + +// NeedLeaderElection implements the manager.LeaderElectionRunnable interface. +func (c *Controller[request]) NeedLeaderElection() bool { + if c.LeaderElected == nil { + return true } - for _, pr := range prct { - if err := c.SetFields(pr); err != nil { - return err - } + return *c.LeaderElected +} + +// Warmup implements the manager.WarmupRunnable interface. +func (c *Controller[request]) Warmup(ctx context.Context) error { + if c.EnableWarmup == nil || !*c.EnableWarmup { + return nil } - log.Info("Starting EventSource", "controller", c.Name, "source", src) - return src.Start(evthdler, c.Queue, prct...) + c.mu.Lock() + defer c.mu.Unlock() + + // Set the ctx so later calls to watch use this internal context + c.ctx = ctx + + return c.startEventSourcesAndQueueLocked(ctx) } -// Start implements controller.Controller -func (c *Controller) Start(stop <-chan struct{}) error { +// Start implements controller.Controller. +func (c *Controller[request]) Start(ctx context.Context) error { + // use an IIFE to get proper lock handling + // but lock outside to get proper handling of the queue shutdown c.mu.Lock() + if c.Started { + return errors.New("controller was started more than once. This is likely to be caused by being added to a manager multiple times") + } - // TODO(pwittrock): Reconsider HandleCrash - defer utilruntime.HandleCrash() - defer c.Queue.ShutDown() + c.initMetrics() - // Start the SharedIndexInformer factories to begin populating the SharedIndexInformer caches - log.Info("Starting Controller", "controller", c.Name) + // Set the internal context. + c.ctx = ctx - // Wait for the caches to be synced before starting workers - if c.WaitForCacheSync == nil { - c.WaitForCacheSync = c.Cache.WaitForCacheSync - } - if ok := c.WaitForCacheSync(stop); !ok { - // This code is unreachable right now since WaitForCacheSync will never return an error - // Leaving it here because that could happen in the future - err := fmt.Errorf("failed to wait for %s caches to sync", c.Name) - log.Error(err, "Could not wait for Cache to sync", "controller", c.Name) - c.mu.Unlock() + wg := &sync.WaitGroup{} + err := func() error { + defer c.mu.Unlock() + + // TODO(pwittrock): Reconsider HandleCrash + defer utilruntime.HandleCrashWithLogger(c.LogConstructor(nil)) + + // NB(directxman12): launch the sources *before* trying to wait for the + // caches to sync so that they have a chance to register their intended + // caches. + if err := c.startEventSourcesAndQueueLocked(ctx); err != nil { + return err + } + + c.LogConstructor(nil).Info("Starting Controller") + + // Launch workers to process resources + c.LogConstructor(nil).Info("Starting workers", "worker count", c.MaxConcurrentReconciles) + wg.Add(c.MaxConcurrentReconciles) + for i := 0; i < c.MaxConcurrentReconciles; i++ { + go func() { + defer wg.Done() + // Run a worker thread that just dequeues items, processes them, and marks them done. + // It enforces that the reconcileHandler is never invoked concurrently with the same object. + for c.processNextWorkItem(ctx) { + } + }() + } + + c.Started = true + return nil + }() + if err != nil { return err } - if c.JitterPeriod == 0 { - c.JitterPeriod = 1 * time.Second - } + <-ctx.Done() + c.LogConstructor(nil).Info("Shutdown signal received, waiting for all workers to finish") + wg.Wait() + c.LogConstructor(nil).Info("All workers finished") + return nil +} - // Launch workers to process resources - log.Info("Starting workers", "controller", c.Name, "worker count", c.MaxConcurrentReconciles) - for i := 0; i < c.MaxConcurrentReconciles; i++ { - // Process work items - go wait.Until(func() { - for c.processNextWorkItem() { +// startEventSourcesAndQueueLocked launches all the sources registered with this controller and waits +// for them to sync. It returns an error if any of the sources fail to start or sync. +func (c *Controller[request]) startEventSourcesAndQueueLocked(ctx context.Context) error { + var retErr error + + c.didStartEventSourcesOnce.Do(func() { + queue := c.NewQueue(c.Name, c.RateLimiter) + if priorityQueue, isPriorityQueue := queue.(priorityqueue.PriorityQueue[request]); isPriorityQueue { + c.Queue = priorityQueue + } else { + c.Queue = &priorityQueueWrapper[request]{TypedRateLimitingInterface: queue} + } + go func() { + <-ctx.Done() + c.Queue.ShutDown() + }() + + errGroup := &errgroup.Group{} + for _, watch := range c.startWatches { + log := c.LogConstructor(nil) + _, ok := watch.(interface { + String() string + }) + if !ok { + log = log.WithValues("source", fmt.Sprintf("%T", watch)) + } else { + log = log.WithValues("source", fmt.Sprintf("%s", watch)) } - }, c.JitterPeriod, stop) - } + didStartSyncingSource := &atomic.Bool{} + errGroup.Go(func() error { + // Use a timeout for starting and syncing the source to avoid silently + // blocking startup indefinitely if it doesn't come up. + sourceStartCtx, cancel := context.WithTimeout(ctx, c.CacheSyncTimeout) + defer cancel() + + sourceStartErrChan := make(chan error, 1) // Buffer chan to not leak goroutine if we time out + go func() { + defer close(sourceStartErrChan) + log.Info("Starting EventSource") + + if err := watch.Start(ctx, c.Queue); err != nil { + sourceStartErrChan <- err + return + } + syncingSource, ok := watch.(source.TypedSyncingSource[request]) + if !ok { + return + } + didStartSyncingSource.Store(true) + if err := syncingSource.WaitForSync(sourceStartCtx); err != nil { + err := fmt.Errorf("failed to wait for %s caches to sync %v: %w", c.Name, syncingSource, err) + log.Error(err, "Could not wait for Cache to sync") + sourceStartErrChan <- err + } + }() + + select { + case err := <-sourceStartErrChan: + return err + case <-sourceStartCtx.Done(): + if didStartSyncingSource.Load() { // We are racing with WaitForSync, wait for it to let it tell us what happened + return <-sourceStartErrChan + } + if ctx.Err() != nil { // Don't return an error if the root context got cancelled + return nil + } + return fmt.Errorf("timed out waiting for source %s to Start. Please ensure that its Start() method is non-blocking", watch) + } + }) + } + retErr = errGroup.Wait() - c.Started = true - c.mu.Unlock() + // All the watches have been started, we can reset the local slice. + // + // We should never hold watches more than necessary, each watch source can hold a backing cache, + // which won't be garbage collected if we hold a reference to it. + c.startWatches = nil - <-stop - log.Info("Stopping workers", "controller", c.Name) - return nil + // Mark event sources as started after resetting the startWatches slice so that watches from + // a new Watch() call are immediately started. + c.startedEventSourcesAndQueue = true + }) + + return retErr } // processNextWorkItem will read a single work item off the workqueue and -// attempt to process it, by calling the syncHandler. -func (c *Controller) processNextWorkItem() bool { - // This code copy-pasted from the sample-Controller. - - // Update metrics after processing each item - reconcileStartTS := time.Now() - defer func() { - c.updateMetrics(time.Now().Sub(reconcileStartTS)) - }() - - obj, shutdown := c.Queue.Get() +// attempt to process it, by calling the reconcileHandler. +func (c *Controller[request]) processNextWorkItem(ctx context.Context) bool { + obj, priority, shutdown := c.Queue.GetWithPriority() if shutdown { // Stop working return false @@ -192,61 +414,135 @@ func (c *Controller) processNextWorkItem() bool { // put back on the workqueue and attempted again after a back-off // period. defer c.Queue.Done(obj) - var req reconcile.Request - var ok bool - if req, ok = obj.(reconcile.Request); !ok { - // As the item in the workqueue is actually invalid, we call - // Forget here else we'd go into a loop of attempting to - // process a work item that is invalid. - c.Queue.Forget(obj) - log.Error(nil, "Queue item was not a Request", - "controller", c.Name, "type", fmt.Sprintf("%T", obj), "value", obj) - // Return true, don't take a break - return true - } - // RunInformersAndControllers the syncHandler, passing it the namespace/Name string of the + ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Add(1) + defer ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Add(-1) + + c.reconcileHandler(ctx, obj, priority) + return true +} + +const ( + labelError = "error" + labelRequeueAfter = "requeue_after" + labelRequeue = "requeue" + labelSuccess = "success" +) + +func (c *Controller[request]) initMetrics() { + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Add(0) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Add(0) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeue).Add(0) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelSuccess).Add(0) + ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Add(0) + ctrlmetrics.TerminalReconcileErrors.WithLabelValues(c.Name).Add(0) + ctrlmetrics.ReconcilePanics.WithLabelValues(c.Name).Add(0) + ctrlmetrics.WorkerCount.WithLabelValues(c.Name).Set(float64(c.MaxConcurrentReconciles)) + ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Set(0) +} + +func (c *Controller[request]) reconcileHandler(ctx context.Context, req request, priority int) { + // Update metrics after processing each item + reconcileStartTS := time.Now() + defer func() { + c.updateMetrics(time.Since(reconcileStartTS)) + }() + + log := c.LogConstructor(&req) + reconcileID := uuid.NewUUID() + + log = log.WithValues("reconcileID", reconcileID) + ctx = logf.IntoContext(ctx, log) + ctx = addReconcileID(ctx, reconcileID) + + // RunInformersAndControllers the syncHandler, passing it the Namespace/Name string of the // resource to be synced. - if result, err := c.Do.Reconcile(req); err != nil { - c.Queue.AddRateLimited(req) - log.Error(err, "Reconciler error", "controller", c.Name, "request", req) + log.V(5).Info("Reconciling") + result, err := c.Reconcile(ctx, req) + if result.Priority != nil { + priority = *result.Priority + } + switch { + case err != nil: + if errors.Is(err, reconcile.TerminalError(nil)) { + ctrlmetrics.TerminalReconcileErrors.WithLabelValues(c.Name).Inc() + } else { + c.Queue.AddWithOpts(priorityqueue.AddOpts{RateLimited: true, Priority: ptr.To(priority)}, req) + } ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Inc() - ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, "error").Inc() - return false - } else if result.RequeueAfter > 0 { + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Inc() + if result.RequeueAfter > 0 || result.Requeue { //nolint: staticcheck // We have to handle Requeue until it is removed + log.Info("Warning: Reconciler returned both a result with either RequeueAfter or Requeue set and a non-nil error. RequeueAfter and Requeue will always be ignored if the error is non-nil. For more details, see: https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler") + } + log.Error(err, "Reconciler error") + case result.RequeueAfter > 0: + log.V(5).Info(fmt.Sprintf("Reconcile done, requeueing after %s", result.RequeueAfter)) // The result.RequeueAfter request will be lost, if it is returned // along with a non-nil error. But this is intended as // We need to drive to stable reconcile loops before queuing due // to result.RequestAfter - c.Queue.Forget(obj) - c.Queue.AddAfter(req, result.RequeueAfter) - ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, "requeue_after").Inc() - return true - } else if result.Requeue { - c.Queue.AddRateLimited(req) - ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, "requeue").Inc() - return true + c.Queue.Forget(req) + c.Queue.AddWithOpts(priorityqueue.AddOpts{After: result.RequeueAfter, Priority: ptr.To(priority)}, req) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Inc() + case result.Requeue: //nolint: staticcheck // We have to handle it until it is removed + log.V(5).Info("Reconcile done, requeueing") + c.Queue.AddWithOpts(priorityqueue.AddOpts{RateLimited: true, Priority: ptr.To(priority)}, req) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeue).Inc() + default: + log.V(5).Info("Reconcile successful") + // Finally, if no error occurs we Forget this item so it does not + // get queued again until another change happens. + c.Queue.Forget(req) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelSuccess).Inc() } +} - // Finally, if no error occurs we Forget this item so it does not - // get queued again until another change happens. - c.Queue.Forget(obj) +// GetLogger returns this controller's logger. +func (c *Controller[request]) GetLogger() logr.Logger { + return c.LogConstructor(nil) +} - // TODO(directxman12): What does 1 mean? Do we want level constants? Do we want levels at all? - log.V(1).Info("Successfully Reconciled", "controller", c.Name, "request", req) +// updateMetrics updates prometheus metrics within the controller. +func (c *Controller[request]) updateMetrics(reconcileTime time.Duration) { + ctrlmetrics.ReconcileTime.WithLabelValues(c.Name).Observe(reconcileTime.Seconds()) +} - ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, "success").Inc() - // Return true, don't take a break - return true +// ReconcileIDFromContext gets the reconcileID from the current context. +func ReconcileIDFromContext(ctx context.Context) types.UID { + r, ok := ctx.Value(reconcileIDKey{}).(types.UID) + if !ok { + return "" + } + + return r } -// InjectFunc implement SetFields.Injector -func (c *Controller) InjectFunc(f inject.Func) error { - c.SetFields = f - return nil +// reconcileIDKey is a context.Context Value key. Its associated value should +// be a types.UID. +type reconcileIDKey struct{} + +func addReconcileID(ctx context.Context, reconcileID types.UID) context.Context { + return context.WithValue(ctx, reconcileIDKey{}, reconcileID) } -// updateMetrics updates prometheus metrics within the controller -func (c *Controller) updateMetrics(reconcileTime time.Duration) { - ctrlmetrics.ReconcileTime.WithLabelValues(c.Name).Observe(reconcileTime.Seconds()) +type priorityQueueWrapper[request comparable] struct { + workqueue.TypedRateLimitingInterface[request] +} + +func (p *priorityQueueWrapper[request]) AddWithOpts(opts priorityqueue.AddOpts, items ...request) { + for _, item := range items { + switch { + case opts.RateLimited: + p.TypedRateLimitingInterface.AddRateLimited(item) + case opts.After > 0: + p.TypedRateLimitingInterface.AddAfter(item, opts.After) + default: + p.TypedRateLimitingInterface.Add(item) + } + } +} + +func (p *priorityQueueWrapper[request]) GetWithPriority() (request, int, bool) { + item, shutdown := p.TypedRateLimitingInterface.Get() + return item, 0, shutdown } diff --git a/pkg/internal/controller/controller_suite_test.go b/pkg/internal/controller/controller_suite_test.go index 634852a53e..3143d3dd74 100644 --- a/pkg/internal/controller/controller_suite_test.go +++ b/pkg/internal/controller/controller_suite_test.go @@ -19,7 +19,7 @@ package controller import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -30,15 +30,15 @@ import ( func TestSource(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, "Controller Integration Suite", []Reporter{envtest.NewlineReporter{}}) + RunSpecs(t, "Controller internal Suite") } var testenv *envtest.Environment var cfg *rest.Config var clientset *kubernetes.Clientset -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) testenv = &envtest.Environment{} @@ -48,9 +48,7 @@ var _ = BeforeSuite(func(done Done) { clientset, err = kubernetes.NewForConfig(cfg) Expect(err).NotTo(HaveOccurred()) - - close(done) -}, 60) +}) var _ = AfterSuite(func() { Expect(testenv.Stop()).To(Succeed()) diff --git a/pkg/internal/controller/controller_test.go b/pkg/internal/controller/controller_test.go index f98d49764d..6d62b80e22 100644 --- a/pkg/internal/controller/controller_test.go +++ b/pkg/internal/controller/controller_test.go @@ -17,420 +17,936 @@ limitations under the License. package controller import ( + "context" + "errors" "fmt" + "sync" + "sync/atomic" "time" - . "github.com/onsi/ginkgo" + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" + "go.uber.org/goleak" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/cache/informertest" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" + "sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics" - "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/leaderelection" + fakeleaderelection "sigs.k8s.io/controller-runtime/pkg/leaderelection/fake" + "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/reconcile/reconciletest" "sigs.k8s.io/controller-runtime/pkg/source" ) +type TestRequest struct { + Key string +} + +const testControllerName = "testcontroller" + var _ = Describe("controller", func() { - var fakeReconcile *reconciletest.FakeReconcile - var ctrl *Controller + var fakeReconcile *fakeReconciler + var ctrl *Controller[reconcile.Request] var queue *controllertest.Queue - var informers *informertest.FakeInformers - var stop chan struct{} var reconciled chan reconcile.Request var request = reconcile.Request{ NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}, } BeforeEach(func() { - stop = make(chan struct{}) reconciled = make(chan reconcile.Request) - fakeReconcile = &reconciletest.FakeReconcile{ - Chan: reconciled, + fakeReconcile = &fakeReconciler{ + Requests: reconciled, + results: make(chan fakeReconcileResultPair, 10 /* chosen by the completely scientific approach of guessing */), } queue = &controllertest.Queue{ - Interface: workqueue.New(), + TypedInterface: workqueue.NewTyped[reconcile.Request](), } - informers = &informertest.FakeInformers{} - ctrl = &Controller{ + ctrl = New[reconcile.Request](Options[reconcile.Request]{ MaxConcurrentReconciles: 1, Do: fakeReconcile, - Queue: queue, - Cache: informers, - } - Expect(ctrl.InjectFunc(func(interface{}) error { return nil })).To(Succeed()) - }) - - AfterEach(func() { - close(stop) + NewQueue: func(string, workqueue.TypedRateLimiter[reconcile.Request]) workqueue.TypedRateLimitingInterface[reconcile.Request] { + return queue + }, + LogConstructor: func(_ *reconcile.Request) logr.Logger { + return log.RuntimeLog.WithName("controller").WithName("test") + }, + }) }) Describe("Reconciler", func() { - It("should call the Reconciler function", func() { - ctrl.Do = reconcile.Func(func(reconcile.Request) (reconcile.Result, error) { + It("should call the Reconciler function", func(ctx SpecContext) { + ctrl.Do = reconcile.Func(func(context.Context, reconcile.Request) (reconcile.Result, error) { return reconcile.Result{Requeue: true}, nil }) - result, err := ctrl.Reconcile( + result, err := ctrl.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}}) Expect(err).NotTo(HaveOccurred()) Expect(result).To(Equal(reconcile.Result{Requeue: true})) }) + + It("should not recover panic if RecoverPanic is false", func(ctx SpecContext) { + defer func() { + Expect(recover()).ShouldNot(BeNil()) + }() + ctrl.RecoverPanic = ptr.To(false) + ctrl.Do = reconcile.Func(func(context.Context, reconcile.Request) (reconcile.Result, error) { + var res *reconcile.Result + return *res, nil + }) + _, _ = ctrl.Reconcile(ctx, + reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}}) + }) + + It("should recover panic if RecoverPanic is true by default", func(ctx SpecContext) { + defer func() { + Expect(recover()).To(BeNil()) + }() + // RecoverPanic defaults to true. + ctrl.Do = reconcile.Func(func(context.Context, reconcile.Request) (reconcile.Result, error) { + var res *reconcile.Result + return *res, nil + }) + _, err := ctrl.Reconcile(ctx, + reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}}) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("[recovered]")) + }) + + It("should recover panic if RecoverPanic is true", func(ctx SpecContext) { + defer func() { + Expect(recover()).To(BeNil()) + }() + ctrl.RecoverPanic = ptr.To(true) + ctrl.Do = reconcile.Func(func(context.Context, reconcile.Request) (reconcile.Result, error) { + var res *reconcile.Result + return *res, nil + }) + _, err := ctrl.Reconcile(ctx, + reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}}) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("[recovered]")) + }) + + It("should time out if ReconciliationTimeout is set", func(ctx SpecContext) { + ctrl.ReconciliationTimeout = time.Duration(1) // One nanosecond + ctrl.Do = reconcile.Func(func(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) { + <-ctx.Done() + return reconcile.Result{}, ctx.Err() + }) + _, err := ctrl.Reconcile(ctx, + reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}}) + Expect(err).To(HaveOccurred()) + Expect(err).To(Equal(context.DeadlineExceeded)) + }) + + It("should not configure a timeout if ReconciliationTimeout is zero", func(ctx SpecContext) { + ctrl.Do = reconcile.Func(func(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) { + defer GinkgoRecover() + + _, ok := ctx.Deadline() + Expect(ok).To(BeFalse()) + return reconcile.Result{}, nil + }) + _, err := ctrl.Reconcile(ctx, + reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "foo", Name: "bar"}}) + Expect(err).NotTo(HaveOccurred()) + }) }) Describe("Start", func() { - It("should return an error if there is an error waiting for the informers", func(done Done) { - ctrl.WaitForCacheSync = func(<-chan struct{}) bool { return false } + It("should return an error if there is an error waiting for the informers", func(ctx SpecContext) { + ctrl.CacheSyncTimeout = time.Second + f := false + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ + source.Kind(&informertest.FakeInformers{Synced: &f}, &corev1.Pod{}, &handler.TypedEnqueueRequestForObject[*corev1.Pod]{}), + } ctrl.Name = "foo" - err := ctrl.Start(stop) + err := ctrl.Start(ctx) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("failed to wait for foo caches to sync")) + }) + + It("should error when cache sync timeout occurs", func(ctx SpecContext) { + c, err := cache.New(cfg, cache.Options{}) + Expect(err).NotTo(HaveOccurred()) + c = &cacheWithIndefinitelyBlockingGetInformer{c} + + ctrl.CacheSyncTimeout = time.Second + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ + source.Kind(c, &appsv1.Deployment{}, &handler.TypedEnqueueRequestForObject[*appsv1.Deployment]{}), + } + ctrl.Name = testControllerName - close(done) + err = ctrl.Start(ctx) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to wait for testcontroller caches to sync kind source: *v1.Deployment: timed out waiting for cache to be synced")) }) - It("should wait for each informer to sync", func(done Done) { - // Use a stopped channel so Start doesn't block - stopped := make(chan struct{}) - close(stopped) + It("should not error when controller Start context is cancelled during Sources WaitForSync", func(specCtx SpecContext) { + ctrl.CacheSyncTimeout = 1 * time.Second + sourceSynced := make(chan struct{}) c, err := cache.New(cfg, cache.Options{}) Expect(err).NotTo(HaveOccurred()) - _, err = c.GetInformer(&appsv1.Deployment{}) - Expect(err).NotTo(HaveOccurred()) - _, err = c.GetInformer(&appsv1.ReplicaSet{}) - Expect(err).NotTo(HaveOccurred()) - ctrl.Cache = c - ctrl.WaitForCacheSync = func(<-chan struct{}) bool { return true } + c = &cacheWithIndefinitelyBlockingGetInformer{c} + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ + &singnallingSourceWrapper{ + SyncingSource: source.Kind[client.Object](c, &appsv1.Deployment{}, &handler.EnqueueRequestForObject{}), + cacheSyncDone: sourceSynced, + }, + } + ctrl.Name = testControllerName - Expect(ctrl.Start(stopped)).NotTo(HaveOccurred()) + ctx, cancel := context.WithCancel(specCtx) + go func() { + defer GinkgoRecover() + err = ctrl.Start(ctx) + Expect(err).To(Succeed()) + }() - close(done) + cancel() + <-sourceSynced }) - }) - Describe("Watch", func() { - It("should inject dependencies into the Source", func() { - src := &source.Kind{Type: &corev1.Pod{}} - Expect(src.InjectCache(ctrl.Cache)).To(Succeed()) - evthdl := &handler.EnqueueRequestForObject{} - found := false - ctrl.SetFields = func(i interface{}) error { - defer GinkgoRecover() - if i == src { - found = true - } - return nil - } - Expect(ctrl.Watch(src, evthdl)).NotTo(HaveOccurred()) - Expect(found).To(BeTrue(), "Source not injected") + It("should error when Start() is blocking forever", func(specCtx SpecContext) { + ctrl.CacheSyncTimeout = time.Second + + controllerDone := make(chan struct{}) + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ + source.Func(func(ctx context.Context, _ workqueue.TypedRateLimitingInterface[reconcile.Request]) error { + <-controllerDone + return ctx.Err() + })} + + ctx, cancel := context.WithTimeout(specCtx, 10*time.Second) + defer cancel() + + err := ctrl.Start(ctx) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("Please ensure that its Start() method is non-blocking")) + + close(controllerDone) }) - It("should return an error if there is an error injecting into the Source", func() { - src := &source.Kind{Type: &corev1.Pod{}} - Expect(src.InjectCache(ctrl.Cache)).To(Succeed()) - evthdl := &handler.EnqueueRequestForObject{} - expected := fmt.Errorf("expect fail source") - ctrl.SetFields = func(i interface{}) error { + It("should not error when cache sync timeout is of sufficiently high", func(ctx SpecContext) { + ctrl.CacheSyncTimeout = 10 * time.Second + + sourceSynced := make(chan struct{}) + c := &informertest.FakeInformers{} + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ + &singnallingSourceWrapper{ + SyncingSource: source.Kind[client.Object](c, &appsv1.Deployment{}, &handler.EnqueueRequestForObject{}), + cacheSyncDone: sourceSynced, + }, + } + + go func() { defer GinkgoRecover() - if i == src { - return expected - } - return nil + Expect(ctrl.Start(ctx)).To(Succeed()) + }() + + <-sourceSynced + }) + + It("should process events from source.Channel", func(ctx SpecContext) { + ctrl.CacheSyncTimeout = 10 * time.Second + // channel to be closed when event is processed + processed := make(chan struct{}) + // source channel + ch := make(chan event.GenericEvent, 1) + + // event to be sent to the channel + p := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"}, } - Expect(ctrl.Watch(src, evthdl)).To(Equal(expected)) + evt := event.GenericEvent{ + Object: p, + } + + ins := source.Channel( + ch, + handler.Funcs{ + GenericFunc: func(ctx context.Context, evt event.GenericEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + close(processed) + }, + }, + ) + + // send the event to the channel + ch <- evt + + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ins} + + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).To(Succeed()) + }() + <-processed }) - It("should inject dependencies into the EventHandler", func() { - src := &source.Kind{Type: &corev1.Pod{}} - Expect(src.InjectCache(ctrl.Cache)).To(Succeed()) - evthdl := &handler.EnqueueRequestForObject{} - found := false - ctrl.SetFields = func(i interface{}) error { + It("should error when channel source is not specified", func(ctx SpecContext) { + ctrl.CacheSyncTimeout = 10 * time.Second + + ins := source.Channel[string](nil, nil) + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ins} + + e := ctrl.Start(ctx) + Expect(e).To(HaveOccurred()) + Expect(e.Error()).To(ContainSubstring("must specify Channel.Source")) + }) + + It("should call Start on sources with the appropriate EventHandler, Queue, and Predicates", func(specCtx SpecContext) { + ctrl.CacheSyncTimeout = 10 * time.Second + started := false + ctx, cancel := context.WithCancel(specCtx) + src := source.Func(func(ctx context.Context, q workqueue.TypedRateLimitingInterface[reconcile.Request]) error { defer GinkgoRecover() - if i == evthdl { - found = true - } + Expect(q).To(Equal(ctrl.Queue)) + + started = true + cancel() // Cancel the context so ctrl.Start() doesn't block forever return nil - } - Expect(ctrl.Watch(src, evthdl)).NotTo(HaveOccurred()) - Expect(found).To(BeTrue(), "EventHandler not injected") + }) + Expect(ctrl.Watch(src)).NotTo(HaveOccurred()) + + err := ctrl.Start(ctx) + Expect(err).To(Succeed()) + Expect(started).To(BeTrue()) }) - It("should return an error if there is an error injecting into the EventHandler", func() { - src := &source.Kind{Type: &corev1.Pod{}} - evthdl := &handler.EnqueueRequestForObject{} - expected := fmt.Errorf("expect fail eventhandler") - ctrl.SetFields = func(i interface{}) error { + It("should return an error if there is an error starting sources", func(ctx SpecContext) { + ctrl.CacheSyncTimeout = 10 * time.Second + err := fmt.Errorf("Expected Error: could not start source") + src := source.Func(func(context.Context, + workqueue.TypedRateLimitingInterface[reconcile.Request], + ) error { defer GinkgoRecover() - if i == evthdl { - return expected - } - return nil + return err + }) + Expect(ctrl.Watch(src)).To(Succeed()) + Expect(ctrl.Start(ctx)).To(Equal(err)) + }) + + It("should return an error if it gets started more than once", func(specCtx SpecContext) { + // Use a cancelled context so Start doesn't block + ctx, cancel := context.WithCancel(specCtx) + cancel() + Expect(ctrl.Start(ctx)).To(Succeed()) + err := ctrl.Start(ctx) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("controller was started more than once. This is likely to be caused by being added to a manager multiple times")) + }) + + It("should check for correct TypedSyncingSource if custom types are used", func(specCtx SpecContext) { + queue := &priorityQueueWrapper[TestRequest]{ + TypedRateLimitingInterface: &controllertest.TypedQueue[TestRequest]{ + TypedInterface: workqueue.NewTyped[TestRequest](), + }} + ctrl := New[TestRequest](Options[TestRequest]{ + NewQueue: func(string, workqueue.TypedRateLimiter[TestRequest]) workqueue.TypedRateLimitingInterface[TestRequest] { + return queue + }, + LogConstructor: func(*TestRequest) logr.Logger { + return log.RuntimeLog.WithName("controller").WithName("test") + }, + }) + ctrl.CacheSyncTimeout = time.Second + src := &bisignallingSource[TestRequest]{ + startCall: make(chan workqueue.TypedRateLimitingInterface[TestRequest]), + startDone: make(chan error, 1), + waitCall: make(chan struct{}), + waitDone: make(chan error, 1), } - Expect(ctrl.Watch(src, evthdl)).To(Equal(expected)) + ctrl.startWatches = []source.TypedSource[TestRequest]{src} + ctrl.Name = "foo" + ctx, cancel := context.WithCancel(specCtx) + defer cancel() + startCh := make(chan error) + go func() { + defer GinkgoRecover() + startCh <- ctrl.Start(ctx) + }() + Eventually(src.startCall).Should(Receive(Equal(queue))) + src.startDone <- nil + Eventually(src.waitCall).Should(BeClosed()) + src.waitDone <- nil + cancel() + Eventually(startCh).Should(Receive(Succeed())) }) + }) - PIt("should inject dependencies into the Reconciler", func() { - // TODO(community): Write this + Describe("startEventSourcesAndQueueLocked", func() { + It("should return nil when no sources are provided", func(ctx SpecContext) { + ctrl.startWatches = []source.TypedSource[reconcile.Request]{} + err := ctrl.startEventSourcesAndQueueLocked(ctx) + Expect(err).NotTo(HaveOccurred()) }) - PIt("should return an error if there is an error injecting into the Reconciler", func() { - // TODO(community): Write this + It("should initialize controller queue when called", func(ctx SpecContext) { + ctrl.startWatches = []source.TypedSource[reconcile.Request]{} + err := ctrl.startEventSourcesAndQueueLocked(ctx) + Expect(err).NotTo(HaveOccurred()) + Expect(ctrl.Queue).NotTo(BeNil()) }) - It("should inject dependencies into all of the Predicates", func() { - src := &source.Kind{Type: &corev1.Pod{}} - Expect(src.InjectCache(ctrl.Cache)).To(Succeed()) - evthdl := &handler.EnqueueRequestForObject{} - pr1 := &predicate.Funcs{} - pr2 := &predicate.Funcs{} - found1 := false - found2 := false - ctrl.SetFields = func(i interface{}) error { - defer GinkgoRecover() - if i == pr1 { - found1 = true - } - if i == pr2 { - found2 = true - } - return nil + It("should return an error if a source fails to start", func(ctx SpecContext) { + expectedErr := fmt.Errorf("failed to start source") + src := source.Func(func(ctx context.Context, _ workqueue.TypedRateLimitingInterface[reconcile.Request]) error { + // Return the error immediately so we don't get a timeout + return expectedErr + }) + + // Set a sufficiently long timeout to avoid timeouts interfering with the error being returned + ctrl.CacheSyncTimeout = 5 * time.Second + ctrl.startWatches = []source.TypedSource[reconcile.Request]{src} + err := ctrl.startEventSourcesAndQueueLocked(ctx) + Expect(err).To(Equal(expectedErr)) + }) + + It("should return an error if a source fails to sync", func(ctx SpecContext) { + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ + source.Kind(&informertest.FakeInformers{Synced: ptr.To(false)}, &corev1.Pod{}, &handler.TypedEnqueueRequestForObject[*corev1.Pod]{}), } - Expect(ctrl.Watch(src, evthdl, pr1, pr2)).NotTo(HaveOccurred()) - Expect(found1).To(BeTrue(), "First Predicated not injected") - Expect(found2).To(BeTrue(), "Second Predicated not injected") + ctrl.Name = "test-controller" + ctrl.CacheSyncTimeout = 5 * time.Second + + err := ctrl.startEventSourcesAndQueueLocked(ctx) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to wait for test-controller caches to sync")) }) - It("should return an error if there is an error injecting into any of the Predicates", func() { - src := &source.Kind{Type: &corev1.Pod{}} - Expect(src.InjectCache(ctrl.Cache)).To(Succeed()) - evthdl := &handler.EnqueueRequestForObject{} - pr1 := &predicate.Funcs{} - pr2 := &predicate.Funcs{} - expected := fmt.Errorf("expect fail predicate") - ctrl.SetFields = func(i interface{}) error { - defer GinkgoRecover() - if i == pr1 { - return expected - } - return nil + It("should not return an error when sources start and sync successfully", func(ctx SpecContext) { + // Create a source that starts and syncs successfully + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ + source.Kind(&informertest.FakeInformers{Synced: ptr.To(true)}, &corev1.Pod{}, &handler.TypedEnqueueRequestForObject[*corev1.Pod]{}), } - Expect(ctrl.Watch(src, evthdl, pr1, pr2)).To(Equal(expected)) + ctrl.Name = "test-controller" + ctrl.CacheSyncTimeout = 5 * time.Second - ctrl.SetFields = func(i interface{}) error { + err := ctrl.startEventSourcesAndQueueLocked(ctx) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should not return an error when context is cancelled during source sync", func(ctx SpecContext) { + sourceCtx, sourceCancel := context.WithCancel(ctx) + defer sourceCancel() + + ctrl.CacheSyncTimeout = 5 * time.Second + + // Create a bisignallingSource to control the test flow + src := &bisignallingSource[reconcile.Request]{ + startCall: make(chan workqueue.TypedRateLimitingInterface[reconcile.Request]), + startDone: make(chan error, 1), + waitCall: make(chan struct{}), + waitDone: make(chan error, 1), + } + + ctrl.startWatches = []source.TypedSource[reconcile.Request]{src} + + // Start the sources in a goroutine + startErrCh := make(chan error) + go func() { defer GinkgoRecover() - if i == pr2 { - return expected - } + startErrCh <- ctrl.startEventSourcesAndQueueLocked(sourceCtx) + }() + + // Allow source to start successfully + Eventually(src.startCall).Should(Receive()) + src.startDone <- nil + + // Wait for WaitForSync to be called + Eventually(src.waitCall).Should(BeClosed()) + + // Return context.Canceled from WaitForSync + src.waitDone <- context.Canceled + + // Also cancel the context + sourceCancel() + + // We expect to receive the context.Canceled error + err := <-startErrCh + Expect(err).To(MatchError(context.Canceled)) + }) + + It("should timeout if source Start blocks for too long", func(ctx SpecContext) { + ctrl.CacheSyncTimeout = 1 * time.Millisecond + + // Create a source that blocks forever in Start + blockingSrc := source.Func(func(ctx context.Context, _ workqueue.TypedRateLimitingInterface[reconcile.Request]) error { + <-ctx.Done() + return ctx.Err() + }) + + ctrl.startWatches = []source.TypedSource[reconcile.Request]{blockingSrc} + + err := ctrl.startEventSourcesAndQueueLocked(ctx) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("timed out waiting for source")) + }) + + It("should only start sources once when called multiple times concurrently", func(ctx SpecContext) { + ctrl.CacheSyncTimeout = 1 * time.Millisecond + + var startCount atomic.Int32 + src := source.Func(func(ctx context.Context, _ workqueue.TypedRateLimitingInterface[reconcile.Request]) error { + startCount.Add(1) return nil + }) + + ctrl.startWatches = []source.TypedSource[reconcile.Request]{src} + + By("Calling startEventSourcesAndQueueLocked multiple times in parallel") + var wg sync.WaitGroup + for i := 1; i <= 5; i++ { + wg.Add(1) + go func() { + defer wg.Done() + err := ctrl.startEventSourcesAndQueueLocked(ctx) + // All calls should return the same nil error + Expect(err).NotTo(HaveOccurred()) + }() } - Expect(ctrl.Watch(src, evthdl, pr1, pr2)).To(Equal(expected)) + + wg.Wait() + Expect(startCount.Load()).To(Equal(int32(1)), "Source should only be started once even when called multiple times") }) - It("should call Start the Source with the EventHandler, Queue, and Predicates", func() { - pr1 := &predicate.Funcs{} - pr2 := &predicate.Funcs{} - evthdl := &handler.EnqueueRequestForObject{} - src := source.Func(func(e handler.EventHandler, q workqueue.RateLimitingInterface, p ...predicate.Predicate) error { - defer GinkgoRecover() - Expect(e).To(Equal(evthdl)) - Expect(q).To(Equal(ctrl.Queue)) - Expect(p).To(ConsistOf(pr1, pr2)) + It("should block subsequent calls from returning until the first call to startEventSourcesAndQueueLocked has returned", func(ctx SpecContext) { + ctrl.CacheSyncTimeout = 5 * time.Second + + // finishSourceChan is closed to unblock startEventSourcesAndQueueLocked from returning + finishSourceChan := make(chan struct{}) + + src := source.Func(func(ctx context.Context, _ workqueue.TypedRateLimitingInterface[reconcile.Request]) error { + <-finishSourceChan return nil }) - Expect(ctrl.Watch(src, evthdl, pr1, pr2)).NotTo(HaveOccurred()) + ctrl.startWatches = []source.TypedSource[reconcile.Request]{src} - }) + By("Calling startEventSourcesAndQueueLocked asynchronously") + wg := sync.WaitGroup{} + go func() { + defer GinkgoRecover() + defer wg.Done() - It("should return an error if there is an error starting the Source", func() { - err := fmt.Errorf("Expected Error: could not start source") - src := source.Func(func(handler.EventHandler, - workqueue.RateLimitingInterface, - ...predicate.Predicate) error { + wg.Add(1) + Expect(ctrl.startEventSourcesAndQueueLocked(ctx)).To(Succeed()) + }() + + By("Calling startEventSourcesAndQueueLocked again") + var didSubsequentCallComplete atomic.Bool + go func() { defer GinkgoRecover() - return err + defer wg.Done() + + wg.Add(1) + Expect(ctrl.startEventSourcesAndQueueLocked(ctx)).To(Succeed()) + didSubsequentCallComplete.Store(true) + }() + + // Assert that second call to startEventSourcesAndQueueLocked is blocked while source has not finished + Consistently(didSubsequentCallComplete.Load).Should(BeFalse()) + + By("Finishing source start + sync") + finishSourceChan <- struct{}{} + + // Assert that second call to startEventSourcesAndQueueLocked is now complete + Eventually(didSubsequentCallComplete.Load).Should(BeTrue(), "startEventSourcesAndQueueLocked should complete after source is started and synced") + wg.Wait() + }) + + It("should reset c.startWatches to nil after returning and startedEventSourcesAndQueue", func(ctx SpecContext) { + ctrl.CacheSyncTimeout = 1 * time.Millisecond + + src := source.Func(func(ctx context.Context, _ workqueue.TypedRateLimitingInterface[reconcile.Request]) error { + return nil }) - Expect(ctrl.Watch(src, &handler.EnqueueRequestForObject{})).To(Equal(err)) + + ctrl.startWatches = []source.TypedSource[reconcile.Request]{src} + + err := ctrl.startEventSourcesAndQueueLocked(ctx) + Expect(err).NotTo(HaveOccurred()) + Expect(ctrl.startWatches).To(BeNil(), "startWatches should be reset to nil after returning") + Expect(ctrl.startedEventSourcesAndQueue).To(BeTrue(), "startedEventSourcesAndQueue should be set to true after startEventSourcesAndQueueLocked returns without error") }) }) Describe("Processing queue items from a Controller", func() { - It("should call Reconciler if an item is enqueued", func(done Done) { + It("should call Reconciler if an item is enqueued", func(ctx SpecContext) { go func() { defer GinkgoRecover() - Expect(ctrl.Start(stop)).NotTo(HaveOccurred()) + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) }() - ctrl.Queue.Add(request) + queue.Add(request) By("Invoking Reconciler") + fakeReconcile.AddResult(reconcile.Result{}, nil) Expect(<-reconciled).To(Equal(request)) By("Removing the item from the queue") - Eventually(ctrl.Queue.Len).Should(Equal(0)) - Eventually(func() int { return ctrl.Queue.NumRequeues(request) }).Should(Equal(0)) - - close(done) + Eventually(queue.Len).Should(Equal(0)) + Eventually(func() int { return queue.NumRequeues(request) }).Should(Equal(0)) }) - It("should continue to process additional queue items after the first", func(done Done) { - ctrl.Do = reconcile.Func(func(reconcile.Request) (reconcile.Result, error) { - defer GinkgoRecover() - Fail("Reconciler should not have been called") - return reconcile.Result{}, nil - }) + It("should requeue a Request if there is an error and continue processing items", func(ctx SpecContext) { go func() { defer GinkgoRecover() - Expect(ctrl.Start(stop)).NotTo(HaveOccurred()) + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) }() - ctrl.Queue.Add("foo/bar") - // Don't expect the string to reconciled - Expect(ctrl.processNextWorkItem()).To(BeTrue()) + queue.Add(request) - Eventually(ctrl.Queue.Len).Should(Equal(0)) - Eventually(func() int { return ctrl.Queue.NumRequeues(request) }).Should(Equal(0)) + By("Invoking Reconciler which will give an error") + fakeReconcile.AddResult(reconcile.Result{}, fmt.Errorf("expected error: reconcile")) + Expect(<-reconciled).To(Equal(request)) + queue.AddedRateLimitedLock.Lock() + Expect(queue.AddedRatelimited).To(Equal([]any{request})) + queue.AddedRateLimitedLock.Unlock() - close(done) - }) + By("Invoking Reconciler a second time without error") + fakeReconcile.AddResult(reconcile.Result{}, nil) + Expect(<-reconciled).To(Equal(request)) - PIt("should forget an item if it is not a Request and continue processing items", func() { - // TODO(community): write this test + By("Removing the item from the queue") + Eventually(queue.Len).Should(Equal(0)) + Eventually(func() int { return queue.NumRequeues(request) }, 1.0).Should(Equal(0)) }) - It("should requeue a Request if there is an error and continue processing items", func(done Done) { - fakeReconcile.Err = fmt.Errorf("expected error: reconcile") + It("should not requeue a Request if there is a terminal error", func(ctx SpecContext) { go func() { defer GinkgoRecover() - Expect(ctrl.Start(stop)).NotTo(HaveOccurred()) + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) }() - ctrl.Queue.Add(request) - // Reduce the jitterperiod so we don't have to wait a second before the reconcile function is rerun. - ctrl.JitterPeriod = time.Millisecond + queue.Add(request) By("Invoking Reconciler which will give an error") + fakeReconcile.AddResult(reconcile.Result{}, reconcile.TerminalError(fmt.Errorf("expected error: reconcile"))) Expect(<-reconciled).To(Equal(request)) - By("Invoking Reconciler a second time without error") - fakeReconcile.Err = nil + queue.AddedRateLimitedLock.Lock() + Expect(queue.AddedRatelimited).To(BeEmpty()) + queue.AddedRateLimitedLock.Unlock() + + Expect(queue.Len()).Should(Equal(0)) + }) + + // TODO(directxman12): we should ensure that backoff occurrs with error requeue + + It("should not reset backoff until there's a non-error result", func(ctx SpecContext) { + dq := &DelegatingQueue{TypedRateLimitingInterface: ctrl.NewQueue("controller1", nil)} + ctrl.NewQueue = func(string, workqueue.TypedRateLimiter[reconcile.Request]) workqueue.TypedRateLimitingInterface[reconcile.Request] { + return dq + } + + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) + }() + + dq.Add(request) + Expect(dq.getCounts()).To(Equal(countInfo{Trying: 1})) + + By("Invoking Reconciler which returns an error") + fakeReconcile.AddResult(reconcile.Result{}, fmt.Errorf("something's wrong")) + Expect(<-reconciled).To(Equal(request)) + Eventually(dq.getCounts).Should(Equal(countInfo{Trying: 1, AddRateLimited: 1})) + + By("Invoking Reconciler a second time with an error") + fakeReconcile.AddResult(reconcile.Result{}, fmt.Errorf("another thing's wrong")) + Expect(<-reconciled).To(Equal(request)) + + Eventually(dq.getCounts).Should(Equal(countInfo{Trying: 1, AddRateLimited: 2})) + + By("Invoking Reconciler a third time, where it finally does not return an error") + fakeReconcile.AddResult(reconcile.Result{}, nil) Expect(<-reconciled).To(Equal(request)) + Eventually(dq.getCounts).Should(Equal(countInfo{Trying: 0, AddRateLimited: 2})) + By("Removing the item from the queue") - Eventually(ctrl.Queue.Len).Should(Equal(0)) - Eventually(func() int { return ctrl.Queue.NumRequeues(request) }).Should(Equal(0)) + Eventually(dq.Len).Should(Equal(0)) + Eventually(func() int { return dq.NumRequeues(request) }).Should(Equal(0)) + }) - close(done) - }, 1.0) + It("should requeue a Request with rate limiting if the Result sets Requeue:true and continue processing items", func(ctx SpecContext) { + dq := &DelegatingQueue{TypedRateLimitingInterface: ctrl.NewQueue("controller1", nil)} + ctrl.NewQueue = func(string, workqueue.TypedRateLimiter[reconcile.Request]) workqueue.TypedRateLimitingInterface[reconcile.Request] { + return dq + } - It("should requeue a Request if the Result sets Requeue:true and continue processing items", func() { - fakeReconcile.Result.Requeue = true go func() { defer GinkgoRecover() - Expect(ctrl.Start(stop)).NotTo(HaveOccurred()) + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) }() - dq := &DelegatingQueue{RateLimitingInterface: ctrl.Queue} - ctrl.Queue = dq - ctrl.Queue.Add(request) - Expect(dq.countAdd).To(Equal(1)) - Expect(dq.countAddAfter).To(Equal(0)) - Expect(dq.countAddRateLimited).To(Equal(0)) + + dq.Add(request) + Expect(dq.getCounts()).To(Equal(countInfo{Trying: 1})) By("Invoking Reconciler which will ask for requeue") + fakeReconcile.AddResult(reconcile.Result{Requeue: true}, nil) Expect(<-reconciled).To(Equal(request)) - Expect(dq.countAdd).To(Equal(1)) - Expect(dq.countAddAfter).To(Equal(0)) - Expect(dq.countAddRateLimited).To(Equal(1)) + Eventually(dq.getCounts).Should(Equal(countInfo{Trying: 1, AddRateLimited: 1})) By("Invoking Reconciler a second time without asking for requeue") - fakeReconcile.Result.Requeue = false + fakeReconcile.AddResult(reconcile.Result{Requeue: false}, nil) Expect(<-reconciled).To(Equal(request)) - Expect(dq.countAdd).To(Equal(1)) - Expect(dq.countAddAfter).To(Equal(0)) - Expect(dq.countAddRateLimited).To(Equal(1)) + + Eventually(dq.getCounts).Should(Equal(countInfo{Trying: 0, AddRateLimited: 1})) By("Removing the item from the queue") - Eventually(ctrl.Queue.Len).Should(Equal(0)) - Eventually(func() int { return ctrl.Queue.NumRequeues(request) }).Should(Equal(0)) + Eventually(dq.Len).Should(Equal(0)) + Eventually(func() int { return dq.NumRequeues(request) }).Should(Equal(0)) }) - It("should requeue a Request after a duration if the Result sets Requeue:true and "+ - "RequeueAfter is set and forget the item", func() { - fakeReconcile.Result.RequeueAfter = time.Millisecond * 100 + It("should retain the priority when the reconciler requests a requeue", func(ctx SpecContext) { + q := &fakePriorityQueue{PriorityQueue: priorityqueue.New[reconcile.Request]("controller1")} + ctrl.NewQueue = func(string, workqueue.TypedRateLimiter[reconcile.Request]) workqueue.TypedRateLimitingInterface[reconcile.Request] { + return q + } + go func() { defer GinkgoRecover() - Expect(ctrl.Start(stop)).NotTo(HaveOccurred()) + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) }() - dq := &DelegatingQueue{RateLimitingInterface: ctrl.Queue} - ctrl.Queue = dq - ctrl.Queue.Add(request) - Expect(dq.countAdd).To(Equal(1)) - Expect(dq.countAddAfter).To(Equal(0)) - Expect(dq.countAddRateLimited).To(Equal(0)) - By("Invoking Reconciler which will ask for requeue") + q.PriorityQueue.AddWithOpts(priorityqueue.AddOpts{Priority: ptr.To(10)}, request) + + By("Invoking Reconciler which will request a requeue") + fakeReconcile.AddResult(reconcile.Result{Requeue: true}, nil) Expect(<-reconciled).To(Equal(request)) - Expect(dq.countAdd).To(Equal(0)) - Expect(dq.countAddAfter).To(Equal(1)) - Expect(dq.countAddRateLimited).To(Equal(0)) + Eventually(func() []priorityQueueAddition { + q.lock.Lock() + defer q.lock.Unlock() + return q.added + }).Should(Equal([]priorityQueueAddition{{ + AddOpts: priorityqueue.AddOpts{ + RateLimited: true, + Priority: ptr.To(10), + }, + items: []reconcile.Request{request}, + }})) + }) - By("Invoking Reconciler a second time without asking for requeue") - fakeReconcile.Result.Requeue = false + It("should use the priority from Result when the reconciler requests a requeue", func(ctx SpecContext) { + q := &fakePriorityQueue{PriorityQueue: priorityqueue.New[reconcile.Request]("controller1")} + ctrl.NewQueue = func(string, workqueue.TypedRateLimiter[reconcile.Request]) workqueue.TypedRateLimitingInterface[reconcile.Request] { + return q + } + + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) + }() + + q.PriorityQueue.AddWithOpts(priorityqueue.AddOpts{Priority: ptr.To(10)}, request) + + By("Invoking Reconciler which will request a requeue") + fakeReconcile.AddResult(reconcile.Result{Requeue: true, Priority: ptr.To(99)}, nil) + Expect(<-reconciled).To(Equal(request)) + Eventually(func() []priorityQueueAddition { + q.lock.Lock() + defer q.lock.Unlock() + return q.added + }).Should(Equal([]priorityQueueAddition{{ + AddOpts: priorityqueue.AddOpts{ + RateLimited: true, + Priority: ptr.To(99), + }, + items: []reconcile.Request{request}, + }})) + }) + + It("should requeue a Request after a duration (but not rate-limited) if the Result sets RequeueAfter (regardless of Requeue)", func(ctx SpecContext) { + dq := &DelegatingQueue{TypedRateLimitingInterface: ctrl.NewQueue("controller1", nil)} + ctrl.NewQueue = func(string, workqueue.TypedRateLimiter[reconcile.Request]) workqueue.TypedRateLimitingInterface[reconcile.Request] { + return dq + } + + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) + }() + + dq.Add(request) + Expect(dq.getCounts()).To(Equal(countInfo{Trying: 1})) + + By("Invoking Reconciler which will ask for requeue & requeueafter") + fakeReconcile.AddResult(reconcile.Result{RequeueAfter: time.Millisecond * 100, Requeue: true}, nil) Expect(<-reconciled).To(Equal(request)) - Expect(dq.countAdd).To(Equal(0)) - Expect(dq.countAddAfter).To(Equal(1)) - Expect(dq.countAddRateLimited).To(Equal(0)) + Eventually(dq.getCounts).Should(Equal(countInfo{Trying: 0, AddAfter: 1})) + + By("Invoking Reconciler a second time asking for a requeueafter only") + fakeReconcile.AddResult(reconcile.Result{RequeueAfter: time.Millisecond * 100}, nil) + Expect(<-reconciled).To(Equal(request)) + + Eventually(dq.getCounts).Should(Equal(countInfo{Trying: -1 /* we don't increment the count in addafter */, AddAfter: 2})) By("Removing the item from the queue") - Eventually(ctrl.Queue.Len).Should(Equal(0)) - Eventually(func() int { return ctrl.Queue.NumRequeues(request) }).Should(Equal(0)) + Eventually(dq.Len).Should(Equal(0)) + Eventually(func() int { return dq.NumRequeues(request) }).Should(Equal(0)) + }) + + It("should retain the priority with RequeueAfter", func(ctx SpecContext) { + q := &fakePriorityQueue{PriorityQueue: priorityqueue.New[reconcile.Request]("controller1")} + ctrl.NewQueue = func(string, workqueue.TypedRateLimiter[reconcile.Request]) workqueue.TypedRateLimitingInterface[reconcile.Request] { + return q + } + + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) + }() + + q.PriorityQueue.AddWithOpts(priorityqueue.AddOpts{Priority: ptr.To(10)}, request) + + By("Invoking Reconciler which will ask for RequeueAfter") + fakeReconcile.AddResult(reconcile.Result{RequeueAfter: time.Millisecond * 100}, nil) + Expect(<-reconciled).To(Equal(request)) + Eventually(func() []priorityQueueAddition { + q.lock.Lock() + defer q.lock.Unlock() + return q.added + }).Should(Equal([]priorityQueueAddition{{ + AddOpts: priorityqueue.AddOpts{ + After: time.Millisecond * 100, + Priority: ptr.To(10), + }, + items: []reconcile.Request{request}, + }})) }) - PIt("should not requeue a Request after a duration if the Result sets Requeue:true and "+ - "RequeueAfter is set and err is not nil", func() { + It("should use the priority from Result with RequeueAfter", func(ctx SpecContext) { + q := &fakePriorityQueue{PriorityQueue: priorityqueue.New[reconcile.Request]("controller1")} + ctrl.NewQueue = func(string, workqueue.TypedRateLimiter[reconcile.Request]) workqueue.TypedRateLimitingInterface[reconcile.Request] { + return q + } - fakeReconcile.Result.RequeueAfter = time.Millisecond * 100 - fakeReconcile.Err = fmt.Errorf("expected error: reconcile") go func() { defer GinkgoRecover() - Expect(ctrl.Start(stop)).NotTo(HaveOccurred()) + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) }() - dq := &DelegatingQueue{RateLimitingInterface: ctrl.Queue} - ctrl.Queue = dq - ctrl.JitterPeriod = time.Millisecond - ctrl.Queue.Add(request) - Expect(dq.countAdd).To(Equal(1)) - Expect(dq.countAddAfter).To(Equal(0)) - Expect(dq.countAddRateLimited).To(Equal(0)) - By("Invoking Reconciler which will ask for requeue with an error") + q.PriorityQueue.AddWithOpts(priorityqueue.AddOpts{Priority: ptr.To(10)}, request) + + By("Invoking Reconciler which will ask for RequeueAfter") + fakeReconcile.AddResult(reconcile.Result{RequeueAfter: time.Millisecond * 100, Priority: ptr.To(99)}, nil) + Expect(<-reconciled).To(Equal(request)) + Eventually(func() []priorityQueueAddition { + q.lock.Lock() + defer q.lock.Unlock() + return q.added + }).Should(Equal([]priorityQueueAddition{{ + AddOpts: priorityqueue.AddOpts{ + After: time.Millisecond * 100, + Priority: ptr.To(99), + }, + items: []reconcile.Request{request}, + }})) + }) + + It("should perform error behavior if error is not nil, regardless of RequeueAfter", func(ctx SpecContext) { + dq := &DelegatingQueue{TypedRateLimitingInterface: ctrl.NewQueue("controller1", nil)} + ctrl.NewQueue = func(string, workqueue.TypedRateLimiter[reconcile.Request]) workqueue.TypedRateLimitingInterface[reconcile.Request] { + return dq + } + + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) + }() + + dq.Add(request) + Expect(dq.getCounts()).To(Equal(countInfo{Trying: 1})) + + By("Invoking Reconciler which will ask for requeueafter with an error") + fakeReconcile.AddResult(reconcile.Result{RequeueAfter: time.Millisecond * 100}, fmt.Errorf("expected error: reconcile")) Expect(<-reconciled).To(Equal(request)) - Expect(dq.countAdd).To(Equal(1)) - Expect(dq.countAddAfter).To(Equal(0)) - Expect(dq.countAddRateLimited).To(Equal(1)) + Eventually(dq.getCounts).Should(Equal(countInfo{Trying: 1, AddRateLimited: 1})) - fakeReconcile.Result.RequeueAfter = time.Millisecond * 100 - fakeReconcile.Err = nil - By("Invoking Reconciler a second time asking for requeue without errors") + By("Invoking Reconciler a second time asking for requeueafter without errors") + fakeReconcile.AddResult(reconcile.Result{RequeueAfter: time.Millisecond * 100}, nil) Expect(<-reconciled).To(Equal(request)) - Expect(dq.countAdd).To(Equal(0)) - Expect(dq.countAddAfter).To(Equal(1)) - Expect(dq.countAddRateLimited).To(Equal(1)) + Eventually(dq.getCounts).Should(Equal(countInfo{AddAfter: 1, AddRateLimited: 1})) By("Removing the item from the queue") - Eventually(ctrl.Queue.Len).Should(Equal(0)) - Eventually(func() int { return ctrl.Queue.NumRequeues(request) }).Should(Equal(0)) + Eventually(dq.Len).Should(Equal(0)) + Eventually(func() int { return dq.NumRequeues(request) }).Should(Equal(0)) }) - PIt("should forget the Request if Reconciler is successful", func() { - // TODO(community): write this test + It("should retain the priority when there was an error", func(ctx SpecContext) { + q := &fakePriorityQueue{PriorityQueue: priorityqueue.New[reconcile.Request]("controller1")} + ctrl.NewQueue = func(string, workqueue.TypedRateLimiter[reconcile.Request]) workqueue.TypedRateLimitingInterface[reconcile.Request] { + return q + } + + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) + }() + + q.PriorityQueue.AddWithOpts(priorityqueue.AddOpts{Priority: ptr.To(10)}, request) + + By("Invoking Reconciler which will return an error") + fakeReconcile.AddResult(reconcile.Result{}, errors.New("oups, I did it again")) + Expect(<-reconciled).To(Equal(request)) + Eventually(func() []priorityQueueAddition { + q.lock.Lock() + defer q.lock.Unlock() + return q.added + }).Should(Equal([]priorityQueueAddition{{ + AddOpts: priorityqueue.AddOpts{ + RateLimited: true, + Priority: ptr.To(10), + }, + items: []reconcile.Request{request}, + }})) + }) + + It("should use the priority from Result when there was an error", func(ctx SpecContext) { + q := &fakePriorityQueue{PriorityQueue: priorityqueue.New[reconcile.Request]("controller1")} + ctrl.NewQueue = func(string, workqueue.TypedRateLimiter[reconcile.Request]) workqueue.TypedRateLimitingInterface[reconcile.Request] { + return q + } + + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) + }() + + q.PriorityQueue.AddWithOpts(priorityqueue.AddOpts{Priority: ptr.To(10)}, request) + + By("Invoking Reconciler which will return an error") + fakeReconcile.AddResult(reconcile.Result{Priority: ptr.To(99)}, errors.New("oups, I did it again")) + Expect(<-reconciled).To(Equal(request)) + Eventually(func() []priorityQueueAddition { + q.lock.Lock() + defer q.lock.Unlock() + return q.added + }).Should(Equal([]priorityQueueAddition{{ + AddOpts: priorityqueue.AddOpts{ + RateLimited: true, + Priority: ptr.To(99), + }, + items: []reconcile.Request{request}, + }})) }) PIt("should return if the queue is shutdown", func() { @@ -453,7 +969,7 @@ var _ = Describe("controller", func() { reconcileTotal.Reset() }) - It("should get updated on successful reconciliation", func(done Done) { + It("should get updated on successful reconciliation", func(ctx SpecContext) { Expect(func() error { Expect(ctrlmetrics.ReconcileTotal.WithLabelValues(ctrl.Name, "success").Write(&reconcileTotal)).To(Succeed()) if reconcileTotal.GetCounter().GetValue() != 0.0 { @@ -464,11 +980,12 @@ var _ = Describe("controller", func() { go func() { defer GinkgoRecover() - Expect(ctrl.Start(stop)).NotTo(HaveOccurred()) + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) }() By("Invoking Reconciler which will succeed") - ctrl.Queue.Add(request) + queue.Add(request) + fakeReconcile.AddResult(reconcile.Result{}, nil) Expect(<-reconciled).To(Equal(request)) Eventually(func() error { Expect(ctrlmetrics.ReconcileTotal.WithLabelValues(ctrl.Name, "success").Write(&reconcileTotal)).To(Succeed()) @@ -477,11 +994,9 @@ var _ = Describe("controller", func() { } return nil }, 2.0).Should(Succeed()) + }) - close(done) - }, 2.0) - - It("should get updated on reconcile errors", func(done Done) { + It("should get updated on reconcile errors", func(ctx SpecContext) { Expect(func() error { Expect(ctrlmetrics.ReconcileTotal.WithLabelValues(ctrl.Name, "error").Write(&reconcileTotal)).To(Succeed()) if reconcileTotal.GetCounter().GetValue() != 0.0 { @@ -490,14 +1005,14 @@ var _ = Describe("controller", func() { return nil }()).Should(Succeed()) - fakeReconcile.Err = fmt.Errorf("expected error: reconcile") go func() { defer GinkgoRecover() - Expect(ctrl.Start(stop)).NotTo(HaveOccurred()) + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) }() By("Invoking Reconciler which will give an error") - ctrl.Queue.Add(request) + queue.Add(request) + fakeReconcile.AddResult(reconcile.Result{}, fmt.Errorf("expected error: reconcile")) Expect(<-reconciled).To(Equal(request)) Eventually(func() error { Expect(ctrlmetrics.ReconcileTotal.WithLabelValues(ctrl.Name, "error").Write(&reconcileTotal)).To(Succeed()) @@ -506,11 +1021,9 @@ var _ = Describe("controller", func() { } return nil }, 2.0).Should(Succeed()) + }) - close(done) - }, 2.0) - - It("should get updated when reconcile returns with retry enabled", func(done Done) { + It("should get updated when reconcile returns with retry enabled", func(ctx SpecContext) { Expect(func() error { Expect(ctrlmetrics.ReconcileTotal.WithLabelValues(ctrl.Name, "retry").Write(&reconcileTotal)).To(Succeed()) if reconcileTotal.GetCounter().GetValue() != 0.0 { @@ -519,14 +1032,15 @@ var _ = Describe("controller", func() { return nil }()).Should(Succeed()) - fakeReconcile.Result.Requeue = true go func() { defer GinkgoRecover() - Expect(ctrl.Start(stop)).NotTo(HaveOccurred()) + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) }() + By("Invoking Reconciler which will return result with Requeue enabled") - ctrl.Queue.Add(request) + queue.Add(request) + fakeReconcile.AddResult(reconcile.Result{Requeue: true}, nil) Expect(<-reconciled).To(Equal(request)) Eventually(func() error { Expect(ctrlmetrics.ReconcileTotal.WithLabelValues(ctrl.Name, "requeue").Write(&reconcileTotal)).To(Succeed()) @@ -535,11 +1049,9 @@ var _ = Describe("controller", func() { } return nil }, 2.0).Should(Succeed()) + }) - close(done) - }, 2.0) - - It("should get updated when reconcile returns with retryAfter enabled", func(done Done) { + It("should get updated when reconcile returns with retryAfter enabled", func(ctx SpecContext) { Expect(func() error { Expect(ctrlmetrics.ReconcileTotal.WithLabelValues(ctrl.Name, "retry_after").Write(&reconcileTotal)).To(Succeed()) if reconcileTotal.GetCounter().GetValue() != 0.0 { @@ -548,14 +1060,14 @@ var _ = Describe("controller", func() { return nil }()).Should(Succeed()) - fakeReconcile.Result.RequeueAfter = 5 * time.Hour go func() { defer GinkgoRecover() - Expect(ctrl.Start(stop)).NotTo(HaveOccurred()) + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) }() By("Invoking Reconciler which will return result with requeueAfter enabled") - ctrl.Queue.Add(request) + queue.Add(request) + fakeReconcile.AddResult(reconcile.Result{RequeueAfter: 5 * time.Hour}, nil) Expect(<-reconciled).To(Equal(request)) Eventually(func() error { Expect(ctrlmetrics.ReconcileTotal.WithLabelValues(ctrl.Name, "requeue_after").Write(&reconcileTotal)).To(Succeed()) @@ -564,13 +1076,11 @@ var _ = Describe("controller", func() { } return nil }, 2.0).Should(Succeed()) - - close(done) - }, 2.0) + }) }) Context("should update prometheus metrics", func() { - It("should requeue a Request if there is an error and continue processing items", func(done Done) { + It("should requeue a Request if there is an error and continue processing items", func(ctx SpecContext) { var reconcileErrs dto.Metric ctrlmetrics.ReconcileErrors.Reset() Expect(func() error { @@ -581,17 +1091,14 @@ var _ = Describe("controller", func() { return nil }()).Should(Succeed()) - fakeReconcile.Err = fmt.Errorf("expected error: reconcile") go func() { defer GinkgoRecover() - Expect(ctrl.Start(stop)).NotTo(HaveOccurred()) + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) }() - ctrl.Queue.Add(request) - - // Reduce the jitterperiod so we don't have to wait a second before the reconcile function is rerun. - ctrl.JitterPeriod = time.Millisecond + queue.Add(request) By("Invoking Reconciler which will give an error") + fakeReconcile.AddResult(reconcile.Result{}, fmt.Errorf("expected error: reconcile")) Expect(<-reconciled).To(Equal(request)) Eventually(func() error { Expect(ctrlmetrics.ReconcileErrors.WithLabelValues(ctrl.Name).Write(&reconcileErrs)).To(Succeed()) @@ -602,17 +1109,15 @@ var _ = Describe("controller", func() { }, 2.0).Should(Succeed()) By("Invoking Reconciler a second time without error") - fakeReconcile.Err = nil + fakeReconcile.AddResult(reconcile.Result{}, nil) Expect(<-reconciled).To(Equal(request)) By("Removing the item from the queue") - Eventually(ctrl.Queue.Len).Should(Equal(0)) - Eventually(func() int { return ctrl.Queue.NumRequeues(request) }).Should(Equal(0)) - - close(done) - }, 2.0) + Eventually(queue.Len).Should(Equal(0)) + Eventually(func() int { return queue.NumRequeues(request) }).Should(Equal(0)) + }) - It("should add a reconcile time to the reconcile time histogram", func(done Done) { + It("should add a reconcile time to the reconcile time histogram", func(ctx SpecContext) { var reconcileTime dto.Metric ctrlmetrics.ReconcileTime.Reset() @@ -628,16 +1133,17 @@ var _ = Describe("controller", func() { go func() { defer GinkgoRecover() - Expect(ctrl.Start(stop)).NotTo(HaveOccurred()) + Expect(ctrl.Start(ctx)).NotTo(HaveOccurred()) }() - ctrl.Queue.Add(request) + queue.Add(request) By("Invoking Reconciler") + fakeReconcile.AddResult(reconcile.Result{}, nil) Expect(<-reconciled).To(Equal(request)) By("Removing the item from the queue") - Eventually(ctrl.Queue.Len).Should(Equal(0)) - Eventually(func() int { return ctrl.Queue.NumRequeues(request) }).Should(Equal(0)) + Eventually(queue.Len).Should(Equal(0)) + Eventually(func() int { return queue.NumRequeues(request) }).Should(Equal(0)) Eventually(func() error { histObserver := ctrlmetrics.ReconcileTime.WithLabelValues(ctrl.Name) @@ -648,37 +1154,703 @@ var _ = Describe("controller", func() { } return nil }, 2.0).Should(Succeed()) + }) + }) + }) + + Describe("Warmup", func() { + JustBeforeEach(func() { + ctrl.EnableWarmup = ptr.To(true) + }) + + It("should track warmup status correctly with successful sync", func(ctx SpecContext) { + // Setup controller with sources that complete successfully + ctrl.CacheSyncTimeout = time.Second + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ + source.Func(func(ctx context.Context, _ workqueue.TypedRateLimitingInterface[reconcile.Request]) error { + return nil + }), + } + + Expect(ctrl.Warmup(ctx)).To(Succeed()) + }) + + It("should return an error if there is an error waiting for the informers", func(ctx SpecContext) { + ctrl.CacheSyncTimeout = time.Second + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ + source.Kind(&informertest.FakeInformers{Synced: ptr.To(false)}, &corev1.Pod{}, &handler.TypedEnqueueRequestForObject[*corev1.Pod]{}), + } + ctrl.Name = testControllerName + err := ctrl.Warmup(ctx) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to wait for testcontroller caches to sync")) + }) + + It("should error when cache sync timeout occurs", func(ctx SpecContext) { + c, err := cache.New(cfg, cache.Options{}) + Expect(err).NotTo(HaveOccurred()) + c = &cacheWithIndefinitelyBlockingGetInformer{c} + + ctrl.CacheSyncTimeout = time.Second + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ + source.Kind(c, &appsv1.Deployment{}, &handler.TypedEnqueueRequestForObject[*appsv1.Deployment]{}), + } + ctrl.Name = testControllerName + + err = ctrl.Warmup(ctx) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to wait for testcontroller caches to sync kind source: *v1.Deployment: timed out waiting for cache to be synced")) + }) + + It("should not error when controller Warmup context is cancelled during Sources WaitForSync", func(specCtx SpecContext) { + ctrl.CacheSyncTimeout = 1 * time.Second + + sourceSynced := make(chan struct{}) + c, err := cache.New(cfg, cache.Options{}) + Expect(err).NotTo(HaveOccurred()) + c = &cacheWithIndefinitelyBlockingGetInformer{c} + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ + &singnallingSourceWrapper{ + SyncingSource: source.Kind[client.Object](c, &appsv1.Deployment{}, &handler.EnqueueRequestForObject{}), + cacheSyncDone: sourceSynced, + }, + } + ctrl.Name = testControllerName + + ctx, cancel := context.WithCancel(specCtx) + go func() { + defer GinkgoRecover() + err = ctrl.Warmup(ctx) + Expect(err).To(Succeed()) + }() + + cancel() + <-sourceSynced + }) + + It("should error when Warmup() is blocking forever", func(specCtx SpecContext) { + ctrl.CacheSyncTimeout = time.Second + + controllerDone := make(chan struct{}) + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ + source.Func(func(ctx context.Context, _ workqueue.TypedRateLimitingInterface[reconcile.Request]) error { + <-controllerDone + return ctx.Err() + })} + + ctx, cancel := context.WithTimeout(specCtx, 10*time.Second) + defer cancel() + + err := ctrl.Warmup(ctx) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("Please ensure that its Start() method is non-blocking")) + + close(controllerDone) + }) + + It("should not error when cache sync timeout is of sufficiently high", func(ctx SpecContext) { + ctrl.CacheSyncTimeout = 10 * time.Second + + sourceSynced := make(chan struct{}) + c := &informertest.FakeInformers{} + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ + &singnallingSourceWrapper{ + SyncingSource: source.Kind[client.Object](c, &appsv1.Deployment{}, &handler.EnqueueRequestForObject{}), + cacheSyncDone: sourceSynced, + }, + } + + go func() { + defer GinkgoRecover() + Expect(ctrl.Warmup(ctx)).To(Succeed()) + }() + + <-sourceSynced + }) + + It("should process events from source.Channel", func(ctx SpecContext) { + ctrl.CacheSyncTimeout = 10 * time.Second + // channel to be closed when event is processed + processed := make(chan struct{}) + // source channel + ch := make(chan event.GenericEvent, 1) + + // event to be sent to the channel + p := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"}, + } + evt := event.GenericEvent{ + Object: p, + } + + ins := source.Channel( + ch, + handler.Funcs{ + GenericFunc: func(ctx context.Context, evt event.GenericEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + close(processed) + }, + }, + ) + + // send the event to the channel + ch <- evt + + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ins} + + go func() { + defer GinkgoRecover() + Expect(ctrl.Warmup(ctx)).To(Succeed()) + }() + <-processed + }) + + It("should error when channel source is not specified", func(ctx SpecContext) { + ctrl.CacheSyncTimeout = 10 * time.Second + + ins := source.Channel[string](nil, nil) + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ins} + + e := ctrl.Warmup(ctx) + Expect(e).To(HaveOccurred()) + Expect(e.Error()).To(ContainSubstring("must specify Channel.Source")) + }) + + It("should call Start on sources with the appropriate EventHandler, Queue, and Predicates", func(specCtx SpecContext) { + ctrl.CacheSyncTimeout = 10 * time.Second + started := false + ctx, cancel := context.WithCancel(specCtx) + src := source.Func(func(ctx context.Context, q workqueue.TypedRateLimitingInterface[reconcile.Request]) error { + defer GinkgoRecover() + Expect(q).To(Equal(ctrl.Queue)) + + started = true + cancel() // Cancel the context so ctrl.Warmup() doesn't block forever + return nil + }) + Expect(ctrl.Watch(src)).NotTo(HaveOccurred()) + + err := ctrl.Warmup(ctx) + Expect(err).To(Succeed()) + Expect(started).To(BeTrue()) + }) + + It("should return an error if there is an error starting sources", func(ctx SpecContext) { + ctrl.CacheSyncTimeout = 10 * time.Second + err := fmt.Errorf("Expected Error: could not start source") + src := source.Func(func(context.Context, + workqueue.TypedRateLimitingInterface[reconcile.Request], + ) error { + defer GinkgoRecover() + return err + }) + Expect(ctrl.Watch(src)).To(Succeed()) + + Expect(ctrl.Warmup(ctx)).To(Equal(err)) + }) + + It("should track warmup status correctly with unsuccessful sync", func(ctx SpecContext) { + // Setup controller with sources that complete with error + ctrl.CacheSyncTimeout = time.Second + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ + source.Func(func(ctx context.Context, _ workqueue.TypedRateLimitingInterface[reconcile.Request]) error { + return errors.New("sync error") + }), + } + + err := ctrl.Warmup(ctx) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("sync error")) + }) + + It("should call Start on sources with the appropriate non-nil queue", func(specCtx SpecContext) { + ctrl.CacheSyncTimeout = 10 * time.Second + started := false + ctx, cancel := context.WithCancel(specCtx) + src := source.Func(func(ctx context.Context, q workqueue.TypedRateLimitingInterface[reconcile.Request]) error { + defer GinkgoRecover() + Expect(q).ToNot(BeNil()) + Expect(q).To(Equal(ctrl.Queue)) + + started = true + cancel() // Cancel the context so ctrl.Start() doesn't block forever + return nil + }) + Expect(ctrl.Watch(src)).To(Succeed()) + Expect(ctrl.Warmup(ctx)).To(Succeed()) + Expect(ctrl.Queue).ToNot(BeNil()) + Expect(started).To(BeTrue()) + }) + + It("should return true if context is cancelled while waiting for source to start", func(specCtx SpecContext) { + // Setup controller with sources that complete with error + ctx, cancel := context.WithCancel(specCtx) + defer cancel() + + ctrl.CacheSyncTimeout = time.Second + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ + source.Func(func(ctx context.Context, _ workqueue.TypedRateLimitingInterface[reconcile.Request]) error { + <-ctx.Done() + return nil + }), + } + + // channel to prevent the goroutine from outliving the It test + waitChan := make(chan struct{}) + + // Invoked in a goroutine because Warmup will block + go func() { + defer GinkgoRecover() + defer close(waitChan) + Expect(ctrl.Warmup(ctx)).To(Succeed()) + }() + + cancel() + <-waitChan + }) + + It("should be called before leader election runnables if warmup is enabled", func(specCtx SpecContext) { + // This unit test exists to ensure that a warmup enabled controller will actually be + // called in the warmup phase before the leader election runnables are started. It + // catches regressions in the controller that would not implement warmupRunnable from + // pkg/manager. + ctx, cancel := context.WithCancel(specCtx) + + By("Creating a channel to track execution order") + runnableExecutionOrderChan := make(chan string, 2) + const nonWarmupRunnableName = "nonWarmupRunnable" + const warmupRunnableName = "warmupRunnable" + + ctrl.CacheSyncTimeout = time.Second + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ + source.Func(func(ctx context.Context, _ workqueue.TypedRateLimitingInterface[reconcile.Request]) error { + runnableExecutionOrderChan <- warmupRunnableName + return nil + }), + } + + nonWarmupCtrl := New[reconcile.Request](Options[reconcile.Request]{ + MaxConcurrentReconciles: 1, + Do: fakeReconcile, + NewQueue: func(string, workqueue.TypedRateLimiter[reconcile.Request]) workqueue.TypedRateLimitingInterface[reconcile.Request] { + return queue + }, + LogConstructor: func(_ *reconcile.Request) logr.Logger { + return log.RuntimeLog.WithName("controller").WithName("test") + }, + CacheSyncTimeout: time.Second, + EnableWarmup: ptr.To(false), + LeaderElected: ptr.To(true), + }) + nonWarmupCtrl.startWatches = []source.TypedSource[reconcile.Request]{ + source.Func(func(ctx context.Context, _ workqueue.TypedRateLimitingInterface[reconcile.Request]) error { + runnableExecutionOrderChan <- nonWarmupRunnableName + return nil + }), + } + + By("Creating a test resource lock with hooks") + resourceLock, err := fakeleaderelection.NewResourceLock(nil, nil, leaderelection.Options{}) + Expect(err).ToNot(HaveOccurred()) + + By("Creating a manager") + testenv = &envtest.Environment{} + cfg, err := testenv.Start() + Expect(err).NotTo(HaveOccurred()) + m, err := manager.New(cfg, manager.Options{ + LeaderElection: true, + LeaderElectionID: "some-leader-election-id", + LeaderElectionNamespace: "default", + LeaderElectionResourceLockInterface: resourceLock, + }) + Expect(err).NotTo(HaveOccurred()) + + By("Adding warmup and non-warmup controllers to the manager") + Expect(m.Add(ctrl)).To(Succeed()) + Expect(m.Add(nonWarmupCtrl)).To(Succeed()) + + By("Blocking leader election") + resourceLockWithHooks, ok := resourceLock.(fakeleaderelection.ControllableResourceLockInterface) + Expect(ok).To(BeTrue(), "resource lock should implement ResourceLockInterfaceWithHooks") + resourceLockWithHooks.BlockLeaderElection() + + By("Starting the manager") + waitChan := make(chan struct{}) + go func() { + defer GinkgoRecover() + defer close(waitChan) + Expect(m.Start(ctx)).To(Succeed()) + }() + Expect(<-runnableExecutionOrderChan).To(Equal(warmupRunnableName)) + + By("Unblocking leader election") + resourceLockWithHooks.UnblockLeaderElection() + <-m.Elected() + Expect(<-runnableExecutionOrderChan).To(Equal(nonWarmupRunnableName)) + + cancel() + <-waitChan + }) + + It("should not cause a data race when called concurrently", func(ctx SpecContext) { + + ctrl.CacheSyncTimeout = time.Second + + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ + source.Func(func(ctx context.Context, _ workqueue.TypedRateLimitingInterface[reconcile.Request]) error { + return nil + }), + } + + var wg sync.WaitGroup + for i := 0; i < 5; i++ { + wg.Add(1) + go func() { + defer GinkgoRecover() + defer wg.Done() + Expect(ctrl.Warmup(ctx)).To(Succeed()) + }() + } + + wg.Wait() + }) + + It("should not cause a data race when called concurrently with Start and only start sources once", func(specCtx SpecContext) { + ctx, cancel := context.WithCancel(specCtx) + + ctrl.CacheSyncTimeout = time.Second + numWatches := 10 + + var watchStartedCount atomic.Int32 + for range numWatches { + ctrl.startWatches = append(ctrl.startWatches, source.Func(func(ctx context.Context, _ workqueue.TypedRateLimitingInterface[reconcile.Request]) error { + watchStartedCount.Add(1) + return nil + })) + } + + By("calling Warmup and Start concurrently") + blockOnStartChan := make(chan struct{}) + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).To(Succeed()) + close(blockOnStartChan) + }() + + blockOnWarmupChan := make(chan struct{}) + go func() { + defer GinkgoRecover() + Expect(ctrl.Warmup(ctx)).To(Succeed()) + close(blockOnWarmupChan) + }() + + <-blockOnWarmupChan + + cancel() + + <-blockOnStartChan + + Expect(watchStartedCount.Load()).To(Equal(int32(numWatches)), "source should only be started once") + Expect(ctrl.startWatches).To(BeNil(), "startWatches should be reset to nil after they are started") + }) + + It("should start sources added after Warmup is called", func(specCtx SpecContext) { + ctx, cancel := context.WithCancel(specCtx) + + ctrl.CacheSyncTimeout = time.Second + + Expect(ctrl.Warmup(ctx)).To(Succeed()) + + By("starting a watch after warmup is added") + var didWatchStart atomic.Bool + Expect(ctrl.Watch(source.Func(func(ctx context.Context, _ workqueue.TypedRateLimitingInterface[reconcile.Request]) error { + didWatchStart.Store(true) + return nil + }))).To(Succeed()) + + waitChan := make(chan struct{}) + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).To(Succeed()) + close(waitChan) + }() + + Eventually(didWatchStart.Load).Should(BeTrue(), "watch should be started if it is added after Warmup") + + cancel() + <-waitChan + }) + + DescribeTable("should not leak goroutines when manager is stopped with warmup runnable", + func(specContext SpecContext, leaderElection bool) { + ctx, cancel := context.WithCancel(specContext) + defer cancel() + + ctrl.CacheSyncTimeout = time.Second + + By("Creating a manager") + testenv = &envtest.Environment{} + cfg, err := testenv.Start() + Expect(err).NotTo(HaveOccurred()) + m, err := manager.New(cfg, manager.Options{ + LeaderElection: leaderElection, + LeaderElectionID: "some-leader-election-id", + LeaderElectionNamespace: "default", + }) + Expect(err).NotTo(HaveOccurred()) + + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ + source.Func(func(ctx context.Context, _ workqueue.TypedRateLimitingInterface[reconcile.Request]) error { + <-ctx.Done() + return nil + }), + } + Expect(m.Add(ctrl)).To(Succeed()) + + // ignore needs to go after the testenv.Start() call to ignore the apiserver + // process + currentGRs := goleak.IgnoreCurrent() + waitChan := make(chan struct{}) + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).To(Succeed()) + close(waitChan) + }() + + <-m.Elected() + By("stopping the manager via context") + cancel() + + Eventually(func() error { return goleak.Find(currentGRs) }).Should(Succeed()) + <-waitChan + }, + Entry("and with leader election enabled", true), + Entry("and without leader election enabled", false), + ) + }) + + Describe("Warmup with warmup disabled", func() { + JustBeforeEach(func() { + ctrl.EnableWarmup = ptr.To(false) + }) + + It("should not start sources when Warmup is called if warmup is disabled but start it when Start is called.", func(specCtx SpecContext) { + // Setup controller with sources that complete successfully + ctx, cancel := context.WithCancel(specCtx) - close(done) - }, 4.0) + ctrl.CacheSyncTimeout = time.Second + var isSourceStarted atomic.Bool + isSourceStarted.Store(false) + ctrl.startWatches = []source.TypedSource[reconcile.Request]{ + source.Func(func(ctx context.Context, _ workqueue.TypedRateLimitingInterface[reconcile.Request]) error { + isSourceStarted.Store(true) + return nil + }), + } + + By("Calling Warmup when EnableWarmup is false") + err := ctrl.Warmup(ctx) + Expect(err).NotTo(HaveOccurred()) + Expect(isSourceStarted.Load()).To(BeFalse()) + + By("Calling Start when EnableWarmup is false") + waitChan := make(chan struct{}) + + go func() { + defer GinkgoRecover() + Expect(ctrl.Start(ctx)).To(Succeed()) + close(waitChan) + }() + Eventually(isSourceStarted.Load).Should(BeTrue()) + cancel() + <-waitChan }) }) }) +var _ = Describe("ReconcileIDFromContext function", func() { + It("should return an empty string if there is nothing in the context", func(ctx SpecContext) { + reconcileID := ReconcileIDFromContext(ctx) + + Expect(reconcileID).To(Equal(types.UID(""))) + }) + + It("should return the correct reconcileID from context", func(specContext SpecContext) { + const expectedReconcileID = types.UID("uuid") + ctx := addReconcileID(specContext, expectedReconcileID) + reconcileID := ReconcileIDFromContext(ctx) + + Expect(reconcileID).To(Equal(expectedReconcileID)) + }) +}) + type DelegatingQueue struct { - workqueue.RateLimitingInterface + workqueue.TypedRateLimitingInterface[reconcile.Request] + mu sync.Mutex countAddRateLimited int countAdd int countAddAfter int } -func (q *DelegatingQueue) AddRateLimited(item interface{}) { +func (q *DelegatingQueue) AddRateLimited(item reconcile.Request) { + q.mu.Lock() + defer q.mu.Unlock() + q.countAddRateLimited++ - q.RateLimitingInterface.AddRateLimited(item) + q.TypedRateLimitingInterface.AddRateLimited(item) } -func (q *DelegatingQueue) AddAfter(item interface{}, d time.Duration) { +func (q *DelegatingQueue) AddAfter(item reconcile.Request, d time.Duration) { + q.mu.Lock() + defer q.mu.Unlock() + q.countAddAfter++ - q.RateLimitingInterface.AddAfter(item, d) + q.TypedRateLimitingInterface.AddAfter(item, d) } -func (q *DelegatingQueue) Add(item interface{}) { +func (q *DelegatingQueue) Add(item reconcile.Request) { + q.mu.Lock() + defer q.mu.Unlock() q.countAdd++ - q.RateLimitingInterface.Add(item) + + q.TypedRateLimitingInterface.Add(item) } -func (q *DelegatingQueue) Forget(item interface{}) { +func (q *DelegatingQueue) Forget(item reconcile.Request) { + q.mu.Lock() + defer q.mu.Unlock() q.countAdd-- - q.RateLimitingInterface.Forget(item) + + q.TypedRateLimitingInterface.Forget(item) +} + +type countInfo struct { + Trying, AddAfter, AddRateLimited int +} + +func (q *DelegatingQueue) getCounts() countInfo { + q.mu.Lock() + defer q.mu.Unlock() + + return countInfo{ + Trying: q.countAdd, + AddAfter: q.countAddAfter, + AddRateLimited: q.countAddRateLimited, + } +} + +type fakeReconcileResultPair struct { + Result reconcile.Result + Err error +} + +type fakeReconciler struct { + Requests chan reconcile.Request + results chan fakeReconcileResultPair +} + +func (f *fakeReconciler) AddResult(res reconcile.Result, err error) { + f.results <- fakeReconcileResultPair{Result: res, Err: err} +} + +func (f *fakeReconciler) Reconcile(_ context.Context, r reconcile.Request) (reconcile.Result, error) { + res := <-f.results + if f.Requests != nil { + f.Requests <- r + } + return res.Result, res.Err +} + +type singnallingSourceWrapper struct { + cacheSyncDone chan struct{} + source.SyncingSource +} + +func (s *singnallingSourceWrapper) Start(ctx context.Context, q workqueue.TypedRateLimitingInterface[reconcile.Request]) error { + err := s.SyncingSource.Start(ctx, q) + if err != nil { + // WaitForSync will never be called if this errors, so close the channel to prevent deadlocks in tests + close(s.cacheSyncDone) + } + return err +} + +func (s *singnallingSourceWrapper) WaitForSync(ctx context.Context) error { + defer func() { + close(s.cacheSyncDone) + }() + return s.SyncingSource.WaitForSync(ctx) +} + +var _ cache.Cache = &cacheWithIndefinitelyBlockingGetInformer{} + +// cacheWithIndefinitelyBlockingGetInformer has a GetInformer implementation that blocks indefinitely or until its +// context is cancelled. +// We need it as a workaround for testenvs lack of support for a secure apiserver, because the insecure port always +// implies the allow all authorizer, so we can not simulate rbac issues with it. They are the usual cause of the real +// caches GetInformer blocking showing this behavior. +// TODO: Remove this once envtest supports a secure apiserver. +type cacheWithIndefinitelyBlockingGetInformer struct { + cache.Cache +} + +func (c *cacheWithIndefinitelyBlockingGetInformer) GetInformer(ctx context.Context, obj client.Object, opts ...cache.InformerGetOption) (cache.Informer, error) { + <-ctx.Done() + return nil, errors.New("GetInformer timed out") +} + +type bisignallingSource[T comparable] struct { + // receives the queue that is passed to Start + startCall chan workqueue.TypedRateLimitingInterface[T] + // passes an error to return from Start + startDone chan error + // closed when WaitForSync is called + waitCall chan struct{} + // passes an error to return from WaitForSync + waitDone chan error +} + +var _ source.TypedSyncingSource[int] = (*bisignallingSource[int])(nil) + +func (t *bisignallingSource[T]) Start(ctx context.Context, q workqueue.TypedRateLimitingInterface[T]) error { + select { + case t.startCall <- q: + case <-ctx.Done(): + return ctx.Err() + } + select { + case err := <-t.startDone: + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +func (t *bisignallingSource[T]) WaitForSync(ctx context.Context) error { + close(t.waitCall) + select { + case err := <-t.waitDone: + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +type priorityQueueAddition struct { + priorityqueue.AddOpts + items []reconcile.Request +} + +type fakePriorityQueue struct { + priorityqueue.PriorityQueue[reconcile.Request] + + lock sync.Mutex + added []priorityQueueAddition +} + +func (f *fakePriorityQueue) AddWithOpts(o priorityqueue.AddOpts, items ...reconcile.Request) { + f.lock.Lock() + defer f.lock.Unlock() + f.added = append(f.added, priorityQueueAddition{AddOpts: o, items: items}) } diff --git a/pkg/internal/controller/metrics/metrics.go b/pkg/internal/controller/metrics/metrics.go index 53c0bb332c..450e9ae25b 100644 --- a/pkg/internal/controller/metrics/metrics.go +++ b/pkg/internal/controller/metrics/metrics.go @@ -17,7 +17,10 @@ limitations under the License. package metrics import ( + "time" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" "sigs.k8s.io/controller-runtime/pkg/metrics" ) @@ -25,24 +28,57 @@ var ( // ReconcileTotal is a prometheus counter metrics which holds the total // number of reconciliations per controller. It has two labels. controller label refers // to the controller name and result label refers to the reconcile result i.e - // success, error, requeue, requeue_after + // success, error, requeue, requeue_after. ReconcileTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Name: "controller_runtime_reconcile_total", Help: "Total number of reconciliations per controller", }, []string{"controller", "result"}) // ReconcileErrors is a prometheus counter metrics which holds the total - // number of errors from the Reconciler + // number of errors from the Reconciler. ReconcileErrors = prometheus.NewCounterVec(prometheus.CounterOpts{ Name: "controller_runtime_reconcile_errors_total", Help: "Total number of reconciliation errors per controller", }, []string{"controller"}) + // TerminalReconcileErrors is a prometheus counter metrics which holds the total + // number of terminal errors from the Reconciler. + TerminalReconcileErrors = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "controller_runtime_terminal_reconcile_errors_total", + Help: "Total number of terminal reconciliation errors per controller", + }, []string{"controller"}) + + // ReconcilePanics is a prometheus counter metrics which holds the total + // number of panics from the Reconciler. + ReconcilePanics = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "controller_runtime_reconcile_panics_total", + Help: "Total number of reconciliation panics per controller", + }, []string{"controller"}) + // ReconcileTime is a prometheus metric which keeps track of the duration - // of reconciliations + // of reconciliations. ReconcileTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Name: "controller_runtime_reconcile_time_seconds", Help: "Length of time per reconciliation per controller", + Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, + 1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50, 60}, + NativeHistogramBucketFactor: 1.1, + NativeHistogramMaxBucketNumber: 100, + NativeHistogramMinResetDuration: 1 * time.Hour, + }, []string{"controller"}) + + // WorkerCount is a prometheus metric which holds the number of + // concurrent reconciles per controller. + WorkerCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "controller_runtime_max_concurrent_reconciles", + Help: "Maximum number of concurrent reconciles per controller", + }, []string{"controller"}) + + // ActiveWorkers is a prometheus metric which holds the number + // of active workers per controller. + ActiveWorkers = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "controller_runtime_active_workers", + Help: "Number of currently used workers per controller", }, []string{"controller"}) ) @@ -50,10 +86,14 @@ func init() { metrics.Registry.MustRegister( ReconcileTotal, ReconcileErrors, + TerminalReconcileErrors, + ReconcilePanics, ReconcileTime, + WorkerCount, + ActiveWorkers, // expose process metrics like CPU, Memory, file descriptor usage etc. - prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}), - // expose Go runtime metrics like GC stats, memory stats etc. - prometheus.NewGoCollector(), + collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}), + // expose all Go runtime metrics like GC stats, memory stats etc. + collectors.NewGoCollector(collectors.WithGoCollectorRuntimeMetrics(collectors.MetricsAll)), ) } diff --git a/pkg/internal/field/selector/utils.go b/pkg/internal/field/selector/utils.go new file mode 100644 index 0000000000..8f6dc71ede --- /dev/null +++ b/pkg/internal/field/selector/utils.go @@ -0,0 +1,37 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selector + +import ( + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/selection" +) + +// RequiresExactMatch checks if the given field selector is of the form `k=v` or `k==v`. +func RequiresExactMatch(sel fields.Selector) bool { + reqs := sel.Requirements() + if len(reqs) == 0 { + return false + } + + for _, req := range reqs { + if req.Operator != selection.Equals && req.Operator != selection.DoubleEquals { + return false + } + } + return true +} diff --git a/examples/conversion/pkg/apis/addtoscheme_jobs_v1.go b/pkg/internal/field/selector/utils_suite_test.go similarity index 67% rename from examples/conversion/pkg/apis/addtoscheme_jobs_v1.go rename to pkg/internal/field/selector/utils_suite_test.go index eca32666cc..dd42f1d1ac 100644 --- a/examples/conversion/pkg/apis/addtoscheme_jobs_v1.go +++ b/pkg/internal/field/selector/utils_suite_test.go @@ -1,4 +1,5 @@ /* +Copyright 2022 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,13 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package apis +package selector_test import ( - "sigs.k8s.io/controller-runtime/examples/conversion/pkg/apis/jobs/v1" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" ) -func init() { - // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back - AddToSchemes = append(AddToSchemes, v1.SchemeBuilder.AddToScheme) +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Fields Selector Utils Suite") } diff --git a/pkg/internal/field/selector/utils_test.go b/pkg/internal/field/selector/utils_test.go new file mode 100644 index 0000000000..a48bbf4e5a --- /dev/null +++ b/pkg/internal/field/selector/utils_test.go @@ -0,0 +1,58 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selector_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/fields" + + . "sigs.k8s.io/controller-runtime/pkg/internal/field/selector" +) + +var _ = Describe("RequiresExactMatch function", func() { + + It("Returns false when the selector matches everything", func() { + requiresExactMatch := RequiresExactMatch(fields.Everything()) + Expect(requiresExactMatch).To(BeFalse()) + }) + + It("Returns false when the selector matches nothing", func() { + requiresExactMatch := RequiresExactMatch(fields.Nothing()) + Expect(requiresExactMatch).To(BeFalse()) + }) + + It("Returns false when the selector has the form key!=val", func() { + requiresExactMatch := RequiresExactMatch(fields.ParseSelectorOrDie("key!=val")) + Expect(requiresExactMatch).To(BeFalse()) + }) + + It("Returns true when the selector has the form key1==val1,key2==val2", func() { + requiresExactMatch := RequiresExactMatch(fields.ParseSelectorOrDie("key1==val1,key2==val2")) + Expect(requiresExactMatch).To(BeTrue()) + }) + + It("Returns true when the selector has the form key==val", func() { + requiresExactMatch := RequiresExactMatch(fields.ParseSelectorOrDie("key==val")) + Expect(requiresExactMatch).To(BeTrue()) + }) + + It("Returns true when the selector has the form key=val", func() { + requiresExactMatch := RequiresExactMatch(fields.ParseSelectorOrDie("key=val")) + Expect(requiresExactMatch).To(BeTrue()) + }) +}) diff --git a/pkg/runtime/scheme/scheme.go b/pkg/internal/flock/doc.go similarity index 67% rename from pkg/runtime/scheme/scheme.go rename to pkg/internal/flock/doc.go index 889308c1e0..11e39823ed 100644 --- a/pkg/runtime/scheme/scheme.go +++ b/pkg/internal/flock/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,9 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package scheme contains utilities for gradually building Schemes, -// which contain information associating Go types with Kubernetes -// groups, versions, and kinds. +// Package flock is copied from k8s.io/kubernetes/pkg/util/flock to avoid +// importing k8s.io/kubernetes as a dependency. // -// Deprecated: use pkg/scheme instead. -package scheme +// Provides file locking functionalities on unix systems. +package flock diff --git a/pkg/internal/flock/errors.go b/pkg/internal/flock/errors.go new file mode 100644 index 0000000000..ee7a434372 --- /dev/null +++ b/pkg/internal/flock/errors.go @@ -0,0 +1,24 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flock + +import "errors" + +var ( + // ErrAlreadyLocked is returned when the file is already locked. + ErrAlreadyLocked = errors.New("the file is already locked") +) diff --git a/pkg/internal/flock/flock_other.go b/pkg/internal/flock/flock_other.go new file mode 100644 index 0000000000..069a5b3a2c --- /dev/null +++ b/pkg/internal/flock/flock_other.go @@ -0,0 +1,24 @@ +// +build !linux,!darwin,!freebsd,!openbsd,!netbsd,!dragonfly + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flock + +// Acquire is not implemented on non-unix systems. +func Acquire(path string) error { + return nil +} diff --git a/pkg/internal/flock/flock_unix.go b/pkg/internal/flock/flock_unix.go new file mode 100644 index 0000000000..71ec576df2 --- /dev/null +++ b/pkg/internal/flock/flock_unix.go @@ -0,0 +1,48 @@ +//go:build linux || darwin || freebsd || openbsd || netbsd || dragonfly +// +build linux darwin freebsd openbsd netbsd dragonfly + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flock + +import ( + "errors" + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +// Acquire acquires a lock on a file for the duration of the process. This method +// is reentrant. +func Acquire(path string) error { + fd, err := unix.Open(path, unix.O_CREAT|unix.O_RDWR|unix.O_CLOEXEC, 0600) + if err != nil { + if errors.Is(err, os.ErrExist) { + return fmt.Errorf("cannot lock file %q: %w", path, ErrAlreadyLocked) + } + return err + } + + // We don't need to close the fd since we should hold + // it until the process exits. + err = unix.Flock(fd, unix.LOCK_NB|unix.LOCK_EX) + if errors.Is(err, unix.EWOULDBLOCK) { // This condition requires LOCK_NB. + return fmt.Errorf("cannot lock file %q: %w", path, ErrAlreadyLocked) + } + return err +} diff --git a/pkg/internal/httpserver/server.go b/pkg/internal/httpserver/server.go new file mode 100644 index 0000000000..b5f91f18e0 --- /dev/null +++ b/pkg/internal/httpserver/server.go @@ -0,0 +1,16 @@ +package httpserver + +import ( + "net/http" + "time" +) + +// New returns a new server with sane defaults. +func New(handler http.Handler) *http.Server { + return &http.Server{ + Handler: handler, + MaxHeaderBytes: 1 << 20, + IdleTimeout: 90 * time.Second, // matches http.DefaultTransport keep-alive timeout + ReadHeaderTimeout: 32 * time.Second, + } +} diff --git a/pkg/internal/log/log.go b/pkg/internal/log/log.go index 0d671b73d6..d91a0ca50c 100644 --- a/pkg/internal/log/log.go +++ b/pkg/internal/log/log.go @@ -14,9 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package log contains utilities for fetching a new logger -// when one is not already available. -// Deprecated: use pkg/log package log import ( diff --git a/pkg/internal/metrics/workqueue.go b/pkg/internal/metrics/workqueue.go new file mode 100644 index 0000000000..402319817b --- /dev/null +++ b/pkg/internal/metrics/workqueue.go @@ -0,0 +1,170 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "strconv" + "time" + + "github.com/prometheus/client_golang/prometheus" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +// This file is copied and adapted from k8s.io/component-base/metrics/prometheus/workqueue +// which registers metrics to the k8s legacy Registry. We require very +// similar functionality, but must register metrics to a different Registry. + +// Metrics subsystem and all keys used by the workqueue. +const ( + WorkQueueSubsystem = metrics.WorkQueueSubsystem + DepthKey = metrics.DepthKey + AddsKey = metrics.AddsKey + QueueLatencyKey = metrics.QueueLatencyKey + WorkDurationKey = metrics.WorkDurationKey + UnfinishedWorkKey = metrics.UnfinishedWorkKey + LongestRunningProcessorKey = metrics.LongestRunningProcessorKey + RetriesKey = metrics.RetriesKey +) + +var ( + depth = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: DepthKey, + Help: "Current depth of workqueue by workqueue and priority", + }, []string{"name", "controller", "priority"}) + + adds = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: WorkQueueSubsystem, + Name: AddsKey, + Help: "Total number of adds handled by workqueue", + }, []string{"name", "controller"}) + + latency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Subsystem: WorkQueueSubsystem, + Name: QueueLatencyKey, + Help: "How long in seconds an item stays in workqueue before being requested", + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 12), + NativeHistogramBucketFactor: 1.1, + NativeHistogramMaxBucketNumber: 100, + NativeHistogramMinResetDuration: 1 * time.Hour, + }, []string{"name", "controller"}) + + workDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Subsystem: WorkQueueSubsystem, + Name: WorkDurationKey, + Help: "How long in seconds processing an item from workqueue takes.", + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 12), + NativeHistogramBucketFactor: 1.1, + NativeHistogramMaxBucketNumber: 100, + NativeHistogramMinResetDuration: 1 * time.Hour, + }, []string{"name", "controller"}) + + unfinished = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: UnfinishedWorkKey, + Help: "How many seconds of work has been done that " + + "is in progress and hasn't been observed by work_duration. Large " + + "values indicate stuck threads. One can deduce the number of stuck " + + "threads by observing the rate at which this increases.", + }, []string{"name", "controller"}) + + longestRunningProcessor = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: LongestRunningProcessorKey, + Help: "How many seconds has the longest running " + + "processor for workqueue been running.", + }, []string{"name", "controller"}) + + retries = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: WorkQueueSubsystem, + Name: RetriesKey, + Help: "Total number of retries handled by workqueue", + }, []string{"name", "controller"}) +) + +func init() { + metrics.Registry.MustRegister(depth) + metrics.Registry.MustRegister(adds) + metrics.Registry.MustRegister(latency) + metrics.Registry.MustRegister(workDuration) + metrics.Registry.MustRegister(unfinished) + metrics.Registry.MustRegister(longestRunningProcessor) + metrics.Registry.MustRegister(retries) + + workqueue.SetProvider(WorkqueueMetricsProvider{}) +} + +type WorkqueueMetricsProvider struct{} + +func (WorkqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { + return depth.WithLabelValues(name, name, "") // no priority +} + +func (WorkqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { + return adds.WithLabelValues(name, name) +} + +func (WorkqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { + return latency.WithLabelValues(name, name) +} + +func (WorkqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { + return workDuration.WithLabelValues(name, name) +} + +func (WorkqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { + return unfinished.WithLabelValues(name, name) +} + +func (WorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { + return longestRunningProcessor.WithLabelValues(name, name) +} + +func (WorkqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { + return retries.WithLabelValues(name, name) +} + +type MetricsProviderWithPriority interface { + workqueue.MetricsProvider + + NewDepthMetricWithPriority(name string) DepthMetricWithPriority +} + +// DepthMetricWithPriority represents a depth metric with priority. +type DepthMetricWithPriority interface { + Inc(priority int) + Dec(priority int) +} + +var _ MetricsProviderWithPriority = WorkqueueMetricsProvider{} + +func (WorkqueueMetricsProvider) NewDepthMetricWithPriority(name string) DepthMetricWithPriority { + return &depthWithPriorityMetric{lvs: []string{name, name}} +} + +type depthWithPriorityMetric struct { + lvs []string +} + +func (g *depthWithPriorityMetric) Inc(priority int) { + depth.WithLabelValues(append(g.lvs, strconv.Itoa(priority))...).Inc() +} + +func (g *depthWithPriorityMetric) Dec(priority int) { + depth.WithLabelValues(append(g.lvs, strconv.Itoa(priority))...).Dec() +} diff --git a/pkg/internal/objectutil/filter.go b/pkg/internal/objectutil/objectutil.go similarity index 99% rename from pkg/internal/objectutil/filter.go rename to pkg/internal/objectutil/objectutil.go index 8513846e2c..0189c04323 100644 --- a/pkg/internal/objectutil/filter.go +++ b/pkg/internal/objectutil/objectutil.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) -// FilterWithLabels returns a copy of the items in objs matching labelSel +// FilterWithLabels returns a copy of the items in objs matching labelSel. func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtime.Object, error) { outItems := make([]runtime.Object, 0, len(objs)) for _, obj := range objs { diff --git a/pkg/internal/recorder/recorder.go b/pkg/internal/recorder/recorder.go index 18e150594c..21f0146ba3 100644 --- a/pkg/internal/recorder/recorder.go +++ b/pkg/internal/recorder/recorder.go @@ -17,45 +17,165 @@ limitations under the License. package recorder import ( + "context" "fmt" + "net/http" + "sync" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/kubernetes" - typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/recorder" ) -type provider struct { +// EventBroadcasterProducer makes an event broadcaster, returning +// whether or not the broadcaster should be stopped with the Provider, +// or not (e.g. if it's shared, it shouldn't be stopped with the Provider). +type EventBroadcasterProducer func() (caster record.EventBroadcaster, stopWithProvider bool) + +// Provider is a recorder.Provider that records events to the k8s API server +// and to a logr Logger. +type Provider struct { + lock sync.RWMutex + stopped bool + // scheme to specify when creating a recorder scheme *runtime.Scheme - // eventBroadcaster to create new recorder instance - eventBroadcaster record.EventBroadcaster // logger is the logger to use when logging diagnostic event info - logger logr.Logger + logger logr.Logger + evtClient corev1client.EventInterface + makeBroadcaster EventBroadcasterProducer + + broadcasterOnce sync.Once + broadcaster record.EventBroadcaster + stopBroadcaster bool +} + +// NB(directxman12): this manually implements Stop instead of Being a runnable because we need to +// stop it *after* everything else shuts down, otherwise we'll cause panics as the leader election +// code finishes up and tries to continue emitting events. + +// Stop attempts to stop this provider, stopping the underlying broadcaster +// if the broadcaster asked to be stopped. It kinda tries to honor the given +// context, but the underlying broadcaster has an indefinite wait that doesn't +// return until all queued events are flushed, so this may end up just returning +// before the underlying wait has finished instead of cancelling the wait. +// This is Very Frustrating™. +func (p *Provider) Stop(shutdownCtx context.Context) { + doneCh := make(chan struct{}) + + go func() { + // technically, this could start the broadcaster, but practically, it's + // almost certainly already been started (e.g. by leader election). We + // need to invoke this to ensure that we don't inadvertently race with + // an invocation of getBroadcaster. + broadcaster := p.getBroadcaster() + if p.stopBroadcaster { + p.lock.Lock() + broadcaster.Shutdown() + p.stopped = true + p.lock.Unlock() + } + close(doneCh) + }() + + select { + case <-shutdownCtx.Done(): + case <-doneCh: + } +} + +// getBroadcaster ensures that a broadcaster is started for this +// provider, and returns it. It's threadsafe. +func (p *Provider) getBroadcaster() record.EventBroadcaster { + // NB(directxman12): this can technically still leak if something calls + // "getBroadcaster" (i.e. Emits an Event) but never calls Start, but if we + // create the broadcaster in start, we could race with other things that + // are started at the same time & want to emit events. The alternative is + // silently swallowing events and more locking, but that seems suboptimal. + + p.broadcasterOnce.Do(func() { + broadcaster, stop := p.makeBroadcaster() + broadcaster.StartRecordingToSink(&corev1client.EventSinkImpl{Interface: p.evtClient}) + broadcaster.StartEventWatcher( + func(e *corev1.Event) { + p.logger.V(1).Info(e.Message, "type", e.Type, "object", e.InvolvedObject, "reason", e.Reason) + }) + p.broadcaster = broadcaster + p.stopBroadcaster = stop + }) + + return p.broadcaster } // NewProvider create a new Provider instance. -func NewProvider(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger) (recorder.Provider, error) { - clientSet, err := kubernetes.NewForConfig(config) - if err != nil { - return nil, fmt.Errorf("failed to init clientSet: %v", err) +func NewProvider(config *rest.Config, httpClient *http.Client, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster EventBroadcasterProducer) (*Provider, error) { + if httpClient == nil { + panic("httpClient must not be nil") } - p := &provider{scheme: scheme, logger: logger} - p.eventBroadcaster = record.NewBroadcaster() - p.eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: clientSet.CoreV1().Events("")}) - p.eventBroadcaster.StartEventWatcher( - func(e *corev1.Event) { - p.logger.V(1).Info(e.Type, "object", e.InvolvedObject, "reason", e.Reason, "message", e.Message) - }) + corev1Client, err := corev1client.NewForConfigAndClient(config, httpClient) + if err != nil { + return nil, fmt.Errorf("failed to init client: %w", err) + } + p := &Provider{scheme: scheme, logger: logger, makeBroadcaster: makeBroadcaster, evtClient: corev1Client.Events("")} return p, nil } -func (p *provider) GetEventRecorderFor(name string) record.EventRecorder { - return p.eventBroadcaster.NewRecorder(p.scheme, corev1.EventSource{Component: name}) +// GetEventRecorderFor returns an event recorder that broadcasts to this provider's +// broadcaster. All events will be associated with a component of the given name. +func (p *Provider) GetEventRecorderFor(name string) record.EventRecorder { + return &lazyRecorder{ + prov: p, + name: name, + } +} + +// lazyRecorder is a recorder that doesn't actually instantiate any underlying +// recorder until the first event is emitted. +type lazyRecorder struct { + prov *Provider + name string + + recOnce sync.Once + rec record.EventRecorder +} + +// ensureRecording ensures that a concrete recorder is populated for this recorder. +func (l *lazyRecorder) ensureRecording() { + l.recOnce.Do(func() { + broadcaster := l.prov.getBroadcaster() + l.rec = broadcaster.NewRecorder(l.prov.scheme, corev1.EventSource{Component: l.name}) + }) +} + +func (l *lazyRecorder) Event(object runtime.Object, eventtype, reason, message string) { + l.ensureRecording() + + l.prov.lock.RLock() + if !l.prov.stopped { + l.rec.Event(object, eventtype, reason, message) + } + l.prov.lock.RUnlock() +} +func (l *lazyRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + l.ensureRecording() + + l.prov.lock.RLock() + if !l.prov.stopped { + l.rec.Eventf(object, eventtype, reason, messageFmt, args...) + } + l.prov.lock.RUnlock() +} +func (l *lazyRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + l.ensureRecording() + + l.prov.lock.RLock() + if !l.prov.stopped { + l.rec.AnnotatedEventf(object, annotations, eventtype, reason, messageFmt, args...) + } + l.prov.lock.RUnlock() } diff --git a/pkg/internal/recorder/recorder_integration_test.go b/pkg/internal/recorder/recorder_integration_test.go index f3134b0cb6..c278fbde79 100644 --- a/pkg/internal/recorder/recorder_integration_test.go +++ b/pkg/internal/recorder/recorder_integration_test.go @@ -17,6 +17,8 @@ limitations under the License. package recorder_test import ( + "context" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -29,24 +31,13 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) var _ = Describe("recorder", func() { - var stop chan struct{} - - BeforeEach(func() { - stop = make(chan struct{}) - Expect(cfg).NotTo(BeNil()) - }) - - AfterEach(func() { - close(stop) - }) - Describe("recorder", func() { - It("should publish events", func(done Done) { + It("should publish events", func(ctx SpecContext) { By("Creating the Manager") cm, err := manager.New(cfg, manager.Options{}) Expect(err).NotTo(HaveOccurred()) @@ -55,8 +46,8 @@ var _ = Describe("recorder", func() { recorder := cm.GetEventRecorderFor("test-recorder") instance, err := controller.New("foo-controller", cm, controller.Options{ Reconciler: reconcile.Func( - func(request reconcile.Request) (reconcile.Result, error) { - dp, err := clientset.AppsV1().Deployments(request.Namespace).Get(request.Name, metav1.GetOptions{}) + func(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + dp, err := clientset.AppsV1().Deployments(request.Namespace).Get(ctx, request.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) recorder.Event(dp, corev1.EventTypeNormal, "test-reason", "test-msg") return reconcile.Result{}, nil @@ -65,13 +56,13 @@ var _ = Describe("recorder", func() { Expect(err).NotTo(HaveOccurred()) By("Watching Resources") - err = instance.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForObject{}) + err = instance.Watch(source.Kind(cm.GetCache(), &appsv1.Deployment{}, &handler.TypedEnqueueRequestForObject[*appsv1.Deployment]{})) Expect(err).NotTo(HaveOccurred()) By("Starting the Manager") go func() { defer GinkgoRecover() - Expect(cm.Start(stop)).NotTo(HaveOccurred()) + Expect(cm.Start(ctx)).NotTo(HaveOccurred()) }() deployment := &appsv1.Deployment{ @@ -95,11 +86,11 @@ var _ = Describe("recorder", func() { } By("Invoking Reconciling") - deployment, err = clientset.AppsV1().Deployments("default").Create(deployment) + deployment, err = clientset.AppsV1().Deployments("default").Create(ctx, deployment, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) By("Validate event is published as expected") - evtWatcher, err := clientset.CoreV1().Events("default").Watch(metav1.ListOptions{}) + evtWatcher, err := clientset.CoreV1().Events("default").Watch(ctx, metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) resultEvent := <-evtWatcher.ResultChan() @@ -115,8 +106,6 @@ var _ = Describe("recorder", func() { Expect(evt.Type).To(Equal(corev1.EventTypeNormal)) Expect(evt.Reason).To(Equal("test-reason")) Expect(evt.Message).To(Equal("test-msg")) - - close(done) }) }) }) diff --git a/pkg/internal/recorder/recorder_suite_test.go b/pkg/internal/recorder/recorder_suite_test.go index 0a28f3c604..e5b5836d58 100644 --- a/pkg/internal/recorder/recorder_suite_test.go +++ b/pkg/internal/recorder/recorder_suite_test.go @@ -17,9 +17,10 @@ limitations under the License. package recorder_test import ( + "net/http" "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -30,15 +31,16 @@ import ( func TestRecorder(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, "Recorder Integration Suite", []Reporter{envtest.NewlineReporter{}}) + RunSpecs(t, "Recorder Integration Suite") } var testenv *envtest.Environment var cfg *rest.Config +var httpClient *http.Client var clientset *kubernetes.Clientset -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) testenv = &envtest.Environment{} @@ -46,11 +48,12 @@ var _ = BeforeSuite(func(done Done) { cfg, err = testenv.Start() Expect(err).NotTo(HaveOccurred()) + httpClient, err = rest.HTTPClientFor(cfg) + Expect(err).ToNot(HaveOccurred()) + clientset, err = kubernetes.NewForConfig(cfg) Expect(err).NotTo(HaveOccurred()) - - close(done) -}, 60) +}) var _ = AfterSuite(func() { Expect(testenv.Stop()).To(Succeed()) diff --git a/pkg/internal/recorder/recorder_test.go b/pkg/internal/recorder/recorder_test.go index 35bc6f1726..e226e165a3 100644 --- a/pkg/internal/recorder/recorder_test.go +++ b/pkg/internal/recorder/recorder_test.go @@ -17,32 +17,35 @@ limitations under the License. package recorder_test import ( - tlog "github.com/go-logr/logr/testing" - . "github.com/onsi/ginkgo" + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/internal/recorder" ) var _ = Describe("recorder.Provider", func() { + makeBroadcaster := func() (record.EventBroadcaster, bool) { return record.NewBroadcaster(), true } Describe("NewProvider", func() { It("should return a provider instance and a nil error.", func() { - provider, err := recorder.NewProvider(cfg, scheme.Scheme, tlog.NullLogger{}) + provider, err := recorder.NewProvider(cfg, httpClient, scheme.Scheme, logr.Discard(), makeBroadcaster) Expect(provider).NotTo(BeNil()) Expect(err).NotTo(HaveOccurred()) }) - It("should return an error if failed to init clientSet.", func() { + It("should return an error if failed to init client.", func() { // Invalid the config cfg1 := *cfg - cfg1.ContentType = "invalid-type" - _, err := recorder.NewProvider(&cfg1, scheme.Scheme, tlog.NullLogger{}) - Expect(err.Error()).To(ContainSubstring("failed to init clientSet")) + cfg1.Host = "invalid host" + _, err := recorder.NewProvider(&cfg1, httpClient, scheme.Scheme, logr.Discard(), makeBroadcaster) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to init client")) }) }) Describe("GetEventRecorder", func() { It("should return a recorder instance.", func() { - provider, err := recorder.NewProvider(cfg, scheme.Scheme, tlog.NullLogger{}) + provider, err := recorder.NewProvider(cfg, httpClient, scheme.Scheme, logr.Discard(), makeBroadcaster) Expect(err).NotTo(HaveOccurred()) recorder := provider.GetEventRecorderFor("test") diff --git a/pkg/source/internal/eventsource.go b/pkg/internal/source/event_handler.go similarity index 52% rename from pkg/source/internal/eventsource.go rename to pkg/internal/source/event_handler.go index 8558e27365..7cc8c51555 100644 --- a/pkg/source/internal/eventsource.go +++ b/pkg/internal/source/event_handler.go @@ -17,78 +17,80 @@ limitations under the License. package internal import ( + "context" "fmt" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/predicate" ) var log = logf.RuntimeLog.WithName("source").WithName("EventHandler") -var _ cache.ResourceEventHandler = EventHandler{} - -// EventHandler adapts a eventhandler.EventHandler interface to a cache.ResourceEventHandler interface -type EventHandler struct { - EventHandler handler.EventHandler - Queue workqueue.RateLimitingInterface - Predicates []predicate.Predicate +var _ cache.ResourceEventHandler = &EventHandler[client.Object, any]{} + +// NewEventHandler creates a new EventHandler. +func NewEventHandler[object client.Object, request comparable]( + ctx context.Context, + queue workqueue.TypedRateLimitingInterface[request], + handler handler.TypedEventHandler[object, request], + predicates []predicate.TypedPredicate[object]) *EventHandler[object, request] { + return &EventHandler[object, request]{ + ctx: ctx, + handler: handler, + queue: queue, + predicates: predicates, + } } -// OnAdd creates CreateEvent and calls Create on EventHandler -func (e EventHandler) OnAdd(obj interface{}) { - c := event.CreateEvent{} +// EventHandler adapts a handler.EventHandler interface to a cache.ResourceEventHandler interface. +type EventHandler[object client.Object, request comparable] struct { + // ctx stores the context that created the event handler + // that is used to propagate cancellation signals to each handler function. + ctx context.Context - // Pull metav1.Object out of the object - if o, err := meta.Accessor(obj); err == nil { - c.Meta = o - } else { - log.Error(err, "OnAdd missing Meta", - "object", obj, "type", fmt.Sprintf("%T", obj)) - return + handler handler.TypedEventHandler[object, request] + queue workqueue.TypedRateLimitingInterface[request] + predicates []predicate.TypedPredicate[object] +} + +// OnAdd creates CreateEvent and calls Create on EventHandler. +func (e *EventHandler[object, request]) OnAdd(obj interface{}, isInInitialList bool) { + c := event.TypedCreateEvent[object]{ + IsInInitialList: isInInitialList, } - // Pull the runtime.Object out of the object - if o, ok := obj.(runtime.Object); ok { + // Pull Object out of the object + if o, ok := obj.(object); ok { c.Object = o } else { - log.Error(nil, "OnAdd missing runtime.Object", + log.Error(nil, "OnAdd missing Object", "object", obj, "type", fmt.Sprintf("%T", obj)) return } - for _, p := range e.Predicates { + for _, p := range e.predicates { if !p.Create(c) { return } } // Invoke create handler - e.EventHandler.Create(c, e.Queue) + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Create(ctx, c, e.queue) } -// OnUpdate creates UpdateEvent and calls Update on EventHandler -func (e EventHandler) OnUpdate(oldObj, newObj interface{}) { - u := event.UpdateEvent{} +// OnUpdate creates UpdateEvent and calls Update on EventHandler. +func (e *EventHandler[object, request]) OnUpdate(oldObj, newObj interface{}) { + u := event.TypedUpdateEvent[object]{} - // Pull metav1.Object out of the object - if o, err := meta.Accessor(oldObj); err == nil { - u.MetaOld = o - } else { - log.Error(err, "OnUpdate missing MetaOld", - "object", oldObj, "type", fmt.Sprintf("%T", oldObj)) - return - } - - // Pull the runtime.Object out of the object - if o, ok := oldObj.(runtime.Object); ok { + if o, ok := oldObj.(object); ok { u.ObjectOld = o } else { log.Error(nil, "OnUpdate missing ObjectOld", @@ -96,37 +98,30 @@ func (e EventHandler) OnUpdate(oldObj, newObj interface{}) { return } - // Pull metav1.Object out of the object - if o, err := meta.Accessor(newObj); err == nil { - u.MetaNew = o - } else { - log.Error(err, "OnUpdate missing MetaNew", - "object", newObj, "type", fmt.Sprintf("%T", newObj)) - return - } - - // Pull the runtime.Object out of the object - if o, ok := newObj.(runtime.Object); ok { + // Pull Object out of the object + if o, ok := newObj.(object); ok { u.ObjectNew = o } else { log.Error(nil, "OnUpdate missing ObjectNew", - "object", oldObj, "type", fmt.Sprintf("%T", oldObj)) + "object", newObj, "type", fmt.Sprintf("%T", newObj)) return } - for _, p := range e.Predicates { + for _, p := range e.predicates { if !p.Update(u) { return } } // Invoke update handler - e.EventHandler.Update(u, e.Queue) + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Update(ctx, u, e.queue) } -// OnDelete creates DeleteEvent and calls Delete on EventHandler -func (e EventHandler) OnDelete(obj interface{}) { - d := event.DeleteEvent{} +// OnDelete creates DeleteEvent and calls Delete on EventHandler. +func (e *EventHandler[object, request]) OnDelete(obj interface{}) { + d := event.TypedDeleteEvent[object]{} // Deal with tombstone events by pulling the object out. Tombstone events wrap the object in a // DeleteFinalStateUnknown struct, so the object needs to be pulled out. @@ -134,7 +129,7 @@ func (e EventHandler) OnDelete(obj interface{}) { // This should never happen if we aren't missing events, which we have concluded that we are not // and made decisions off of this belief. Maybe this shouldn't be here? var ok bool - if _, ok = obj.(metav1.Object); !ok { + if _, ok = obj.(client.Object); !ok { // If the object doesn't have Metadata, assume it is a tombstone object of type DeletedFinalStateUnknown tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { @@ -144,34 +139,30 @@ func (e EventHandler) OnDelete(obj interface{}) { return } + // Set DeleteStateUnknown to true + d.DeleteStateUnknown = true + // Set obj to the tombstone obj obj = tombstone.Obj } - // Pull metav1.Object out of the object - if o, err := meta.Accessor(obj); err == nil { - d.Meta = o - } else { - log.Error(err, "OnDelete missing Meta", - "object", obj, "type", fmt.Sprintf("%T", obj)) - return - } - - // Pull the runtime.Object out of the object - if o, ok := obj.(runtime.Object); ok { + // Pull Object out of the object + if o, ok := obj.(object); ok { d.Object = o } else { - log.Error(nil, "OnDelete missing runtime.Object", + log.Error(nil, "OnDelete missing Object", "object", obj, "type", fmt.Sprintf("%T", obj)) return } - for _, p := range e.Predicates { + for _, p := range e.predicates { if !p.Delete(d) { return } } // Invoke delete handler - e.EventHandler.Delete(d, e.Queue) + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Delete(ctx, d, e.queue) } diff --git a/pkg/source/internal/internal_suite_test.go b/pkg/internal/source/internal_suite_test.go similarity index 78% rename from pkg/source/internal/internal_suite_test.go rename to pkg/internal/source/internal_suite_test.go index 53848f2db7..eeee8b22cd 100644 --- a/pkg/source/internal/internal_suite_test.go +++ b/pkg/internal/source/internal_suite_test.go @@ -19,18 +19,17 @@ package internal_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" ) func TestInternal(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, "Internal Suite", []Reporter{envtest.NewlineReporter{}}) + RunSpecs(t, "Source Internal Suite") } var _ = BeforeSuite(func() { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) }) diff --git a/pkg/source/internal/internal_test.go b/pkg/internal/source/internal_test.go similarity index 57% rename from pkg/source/internal/internal_test.go rename to pkg/internal/source/internal_test.go index 183c0c7371..73eb1a1d28 100644 --- a/pkg/source/internal/internal_test.go +++ b/pkg/internal/source/internal_test.go @@ -17,14 +17,17 @@ limitations under the License. package internal_test import ( - . "github.com/onsi/ginkgo" + "context" + + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/source/internal" + internal "sigs.k8s.io/controller-runtime/pkg/internal/source" + "sigs.k8s.io/controller-runtime/pkg/reconcile" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -35,48 +38,44 @@ import ( ) var _ = Describe("Internal", func() { - - var instance internal.EventHandler + var instance *internal.EventHandler[client.Object, reconcile.Request] var funcs, setfuncs *handler.Funcs var set bool - BeforeEach(func() { + BeforeEach(func(ctx SpecContext) { funcs = &handler.Funcs{ - CreateFunc: func(event.CreateEvent, workqueue.RateLimitingInterface) { + CreateFunc: func(context.Context, event.CreateEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Fail("Did not expect CreateEvent to be called.") }, - DeleteFunc: func(e event.DeleteEvent, q workqueue.RateLimitingInterface) { + DeleteFunc: func(context.Context, event.DeleteEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Fail("Did not expect DeleteEvent to be called.") }, - UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { + UpdateFunc: func(context.Context, event.UpdateEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Fail("Did not expect UpdateEvent to be called.") }, - GenericFunc: func(event.GenericEvent, workqueue.RateLimitingInterface) { + GenericFunc: func(context.Context, event.GenericEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Fail("Did not expect GenericEvent to be called.") }, } setfuncs = &handler.Funcs{ - CreateFunc: func(event.CreateEvent, workqueue.RateLimitingInterface) { + CreateFunc: func(context.Context, event.CreateEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { set = true }, - DeleteFunc: func(e event.DeleteEvent, q workqueue.RateLimitingInterface) { + DeleteFunc: func(context.Context, event.DeleteEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { set = true }, - UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { + UpdateFunc: func(context.Context, event.UpdateEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { set = true }, - GenericFunc: func(event.GenericEvent, workqueue.RateLimitingInterface) { + GenericFunc: func(context.Context, event.GenericEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { set = true }, } - instance = internal.EventHandler{ - Queue: controllertest.Queue{}, - EventHandler: funcs, - } + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, funcs, nil) }) Describe("EventHandler", func() { @@ -92,253 +91,207 @@ var _ = Describe("Internal", func() { newPod.Labels = map[string]string{"foo": "bar"} }) - It("should create a CreateEvent", func(done Done) { - funcs.CreateFunc = func(evt event.CreateEvent, q workqueue.RateLimitingInterface) { + It("should create a CreateEvent", func(ctx SpecContext) { + funcs.CreateFunc = func(ctx context.Context, evt event.CreateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() - Expect(q).To(Equal(instance.Queue)) - m, err := meta.Accessor(pod) - Expect(err).NotTo(HaveOccurred()) - Expect(evt.Meta).To(Equal(m)) Expect(evt.Object).To(Equal(pod)) } - instance.OnAdd(pod) - close(done) + instance.OnAdd(pod, false) }) - It("should used Predicates to filter CreateEvents", func(done Done) { - instance = internal.EventHandler{ - Queue: controllertest.Queue{}, - EventHandler: setfuncs, - } - - set = false - instance.Predicates = []predicate.Predicate{ + It("should used Predicates to filter CreateEvents", func(ctx SpecContext) { + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return false }}, - } - instance.OnAdd(pod) + }) + set = false + instance.OnAdd(pod, false) Expect(set).To(BeFalse()) set = false - instance.Predicates = []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, - } - instance.OnAdd(pod) + }) + instance.OnAdd(pod, false) Expect(set).To(BeTrue()) set = false - instance.Predicates = []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return false }}, - } - instance.OnAdd(pod) + }) + instance.OnAdd(pod, false) Expect(set).To(BeFalse()) set = false - instance.Predicates = []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return false }}, predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, - } - instance.OnAdd(pod) + }) + instance.OnAdd(pod, false) Expect(set).To(BeFalse()) set = false - instance.Predicates = []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, - } - instance.OnAdd(pod) + }) + instance.OnAdd(pod, false) Expect(set).To(BeTrue()) - - close(done) }) - It("should not call Create EventHandler if the object is not a runtime.Object", func(done Done) { - instance.OnAdd(&metav1.ObjectMeta{}) - close(done) + It("should not call Create EventHandler if the object is not a runtime.Object", func() { + instance.OnAdd(&metav1.ObjectMeta{}, false) }) - It("should not call Create EventHandler if the object does not have metadata", func(done Done) { - instance.OnAdd(FooRuntimeObject{}) - close(done) + It("should not call Create EventHandler if the object does not have metadata", func() { + instance.OnAdd(FooRuntimeObject{}, false) }) - It("should create an UpdateEvent", func(done Done) { - funcs.UpdateFunc = func(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { + It("should create an UpdateEvent", func(ctx SpecContext) { + funcs.UpdateFunc = func(ctx context.Context, evt event.UpdateEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() - Expect(q).To(Equal(instance.Queue)) - - m, err := meta.Accessor(pod) - Expect(err).NotTo(HaveOccurred()) - Expect(evt.MetaOld).To(Equal(m)) Expect(evt.ObjectOld).To(Equal(pod)) - - m, err = meta.Accessor(newPod) - Expect(err).NotTo(HaveOccurred()) - Expect(evt.MetaNew).To(Equal(m)) Expect(evt.ObjectNew).To(Equal(newPod)) } instance.OnUpdate(pod, newPod) - close(done) }) - It("should used Predicates to filter UpdateEvents", func(done Done) { - instance = internal.EventHandler{ - Queue: controllertest.Queue{}, - EventHandler: setfuncs, - } - + It("should used Predicates to filter UpdateEvents", func(ctx SpecContext) { set = false - instance.Predicates = []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ predicate.Funcs{UpdateFunc: func(updateEvent event.UpdateEvent) bool { return false }}, - } + }) instance.OnUpdate(pod, newPod) Expect(set).To(BeFalse()) set = false - instance.Predicates = []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ predicate.Funcs{UpdateFunc: func(event.UpdateEvent) bool { return true }}, - } + }) instance.OnUpdate(pod, newPod) Expect(set).To(BeTrue()) set = false - instance.Predicates = []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ predicate.Funcs{UpdateFunc: func(event.UpdateEvent) bool { return true }}, predicate.Funcs{UpdateFunc: func(event.UpdateEvent) bool { return false }}, - } + }) instance.OnUpdate(pod, newPod) Expect(set).To(BeFalse()) set = false - instance.Predicates = []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ predicate.Funcs{UpdateFunc: func(event.UpdateEvent) bool { return false }}, predicate.Funcs{UpdateFunc: func(event.UpdateEvent) bool { return true }}, - } + }) instance.OnUpdate(pod, newPod) Expect(set).To(BeFalse()) set = false - instance.Predicates = []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, predicate.Funcs{CreateFunc: func(event.CreateEvent) bool { return true }}, - } + }) instance.OnUpdate(pod, newPod) Expect(set).To(BeTrue()) - - close(done) }) - It("should not call Update EventHandler if the object is not a runtime.Object", func(done Done) { + It("should not call Update EventHandler if the object is not a runtime.Object", func() { instance.OnUpdate(&metav1.ObjectMeta{}, &corev1.Pod{}) instance.OnUpdate(&corev1.Pod{}, &metav1.ObjectMeta{}) - close(done) }) - It("should not call Update EventHandler if the object does not have metadata", func(done Done) { + It("should not call Update EventHandler if the object does not have metadata", func() { instance.OnUpdate(FooRuntimeObject{}, &corev1.Pod{}) instance.OnUpdate(&corev1.Pod{}, FooRuntimeObject{}) - close(done) }) - It("should create a DeleteEvent", func(done Done) { - funcs.DeleteFunc = func(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { + It("should create a DeleteEvent", func() { + funcs.DeleteFunc = func(ctx context.Context, evt event.DeleteEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() - Expect(q).To(Equal(instance.Queue)) - - m, err := meta.Accessor(pod) - Expect(err).NotTo(HaveOccurred()) - Expect(evt.Meta).To(Equal(m)) Expect(evt.Object).To(Equal(pod)) } instance.OnDelete(pod) - close(done) }) - It("should used Predicates to filter DeleteEvents", func(done Done) { - instance = internal.EventHandler{ - Queue: controllertest.Queue{}, - EventHandler: setfuncs, - } - + It("should used Predicates to filter DeleteEvents", func(ctx SpecContext) { set = false - instance.Predicates = []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return false }}, - } + }) instance.OnDelete(pod) Expect(set).To(BeFalse()) set = false - instance.Predicates = []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return true }}, - } + }) instance.OnDelete(pod) Expect(set).To(BeTrue()) set = false - instance.Predicates = []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return true }}, predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return false }}, - } + }) instance.OnDelete(pod) Expect(set).To(BeFalse()) set = false - instance.Predicates = []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return false }}, predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return true }}, - } + }) instance.OnDelete(pod) Expect(set).To(BeFalse()) set = false - instance.Predicates = []predicate.Predicate{ + instance = internal.NewEventHandler(ctx, &controllertest.Queue{}, setfuncs, []predicate.Predicate{ predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return true }}, predicate.Funcs{DeleteFunc: func(event.DeleteEvent) bool { return true }}, - } + }) instance.OnDelete(pod) Expect(set).To(BeTrue()) - - close(done) }) - It("should not call Delete EventHandler if the object is not a runtime.Object", func(done Done) { + It("should not call Delete EventHandler if the object is not a runtime.Object", func() { instance.OnDelete(&metav1.ObjectMeta{}) - close(done) }) - It("should not call Delete EventHandler if the object does not have metadata", func(done Done) { + It("should not call Delete EventHandler if the object does not have metadata", func() { instance.OnDelete(FooRuntimeObject{}) - close(done) }) - It("should create a DeleteEvent from a tombstone", func(done Done) { - + It("should create a DeleteEvent from a tombstone", func() { tombstone := cache.DeletedFinalStateUnknown{ Obj: pod, } - funcs.DeleteFunc = func(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { + funcs.DeleteFunc = func(ctx context.Context, evt event.DeleteEvent, q workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() - Expect(q).To(Equal(instance.Queue)) - m, err := meta.Accessor(pod) - Expect(err).NotTo(HaveOccurred()) - Expect(evt.Meta).To(Equal(m)) Expect(evt.Object).To(Equal(pod)) + Expect(evt.DeleteStateUnknown).Should(BeTrue()) } instance.OnDelete(tombstone) - close(done) }) - It("should ignore tombstone objects without meta", func(done Done) { + It("should ignore tombstone objects without meta", func() { tombstone := cache.DeletedFinalStateUnknown{Obj: Foo{}} instance.OnDelete(tombstone) - close(done) }) - It("should ignore objects without meta", func(done Done) { - instance.OnAdd(Foo{}) + It("should ignore objects without meta", func() { + instance.OnAdd(Foo{}, false) instance.OnUpdate(Foo{}, Foo{}) instance.OnDelete(Foo{}) - close(done) + }) + }) + + Describe("Kind", func() { + It("should return kind source type", func() { + kind := internal.Kind[*corev1.Pod, reconcile.Request]{ + Type: &corev1.Pod{}, + } + Expect(kind.String()).Should(Equal("kind source: *v1.Pod")) }) }) }) diff --git a/pkg/internal/source/kind.go b/pkg/internal/source/kind.go new file mode 100644 index 0000000000..2854244523 --- /dev/null +++ b/pkg/internal/source/kind.go @@ -0,0 +1,143 @@ +package internal + +import ( + "context" + "errors" + "fmt" + "reflect" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + toolscache "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +var logKind = logf.RuntimeLog.WithName("source").WithName("Kind") + +// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). +type Kind[object client.Object, request comparable] struct { + // Type is the type of object to watch. e.g. &v1.Pod{} + Type object + + // Cache used to watch APIs + Cache cache.Cache + + Handler handler.TypedEventHandler[object, request] + + Predicates []predicate.TypedPredicate[object] + + // startedErr may contain an error if one was encountered during startup. If its closed and does not + // contain an error, startup and syncing finished. + startedErr chan error + startCancel func() +} + +// Start is internal and should be called only by the Controller to register an EventHandler with the Informer +// to enqueue reconcile.Requests. +func (ks *Kind[object, request]) Start(ctx context.Context, queue workqueue.TypedRateLimitingInterface[request]) error { + if isNil(ks.Type) { + return fmt.Errorf("must create Kind with a non-nil object") + } + if isNil(ks.Cache) { + return fmt.Errorf("must create Kind with a non-nil cache") + } + if isNil(ks.Handler) { + return errors.New("must create Kind with non-nil handler") + } + + // cache.GetInformer will block until its context is cancelled if the cache was already started and it can not + // sync that informer (most commonly due to RBAC issues). + ctx, ks.startCancel = context.WithCancel(ctx) + ks.startedErr = make(chan error, 1) // Buffer chan to not leak goroutines if WaitForSync isn't called + go func() { + var ( + i cache.Informer + lastErr error + ) + + // Tries to get an informer until it returns true, + // an error or the specified context is cancelled or expired. + if err := wait.PollUntilContextCancel(ctx, 10*time.Second, true, func(ctx context.Context) (bool, error) { + // Lookup the Informer from the Cache and add an EventHandler which populates the Queue + i, lastErr = ks.Cache.GetInformer(ctx, ks.Type) + if lastErr != nil { + kindMatchErr := &meta.NoKindMatchError{} + switch { + case errors.As(lastErr, &kindMatchErr): + logKind.Error(lastErr, "if kind is a CRD, it should be installed before calling Start", + "kind", kindMatchErr.GroupKind) + case runtime.IsNotRegisteredError(lastErr): + logKind.Error(lastErr, "kind must be registered to the Scheme") + default: + logKind.Error(lastErr, "failed to get informer from cache") + } + return false, nil // Retry. + } + return true, nil + }); err != nil { + if lastErr != nil { + ks.startedErr <- fmt.Errorf("failed to get informer from cache: %w", lastErr) + return + } + ks.startedErr <- err + return + } + + _, err := i.AddEventHandlerWithOptions(NewEventHandler(ctx, queue, ks.Handler, ks.Predicates), toolscache.HandlerOptions{ + Logger: &logKind, + }) + if err != nil { + ks.startedErr <- err + return + } + if !ks.Cache.WaitForCacheSync(ctx) { + // Would be great to return something more informative here + ks.startedErr <- errors.New("cache did not sync") + } + close(ks.startedErr) + }() + + return nil +} + +func (ks *Kind[object, request]) String() string { + if !isNil(ks.Type) { + return fmt.Sprintf("kind source: %T", ks.Type) + } + return "kind source: unknown type" +} + +// WaitForSync implements SyncingSource to allow controllers to wait with starting +// workers until the cache is synced. +func (ks *Kind[object, request]) WaitForSync(ctx context.Context) error { + select { + case err := <-ks.startedErr: + return err + case <-ctx.Done(): + ks.startCancel() + if errors.Is(ctx.Err(), context.Canceled) { + return nil + } + return fmt.Errorf("timed out waiting for cache to be synced for Kind %T", ks.Type) + } +} + +func isNil(arg any) bool { + if v := reflect.ValueOf(arg); !v.IsValid() || ((v.Kind() == reflect.Ptr || + v.Kind() == reflect.Interface || + v.Kind() == reflect.Slice || + v.Kind() == reflect.Map || + v.Kind() == reflect.Chan || + v.Kind() == reflect.Func) && v.IsNil()) { + return true + } + return false +} diff --git a/pkg/internal/syncs/syncs.go b/pkg/internal/syncs/syncs.go new file mode 100644 index 0000000000..c78a30377a --- /dev/null +++ b/pkg/internal/syncs/syncs.go @@ -0,0 +1,38 @@ +package syncs + +import ( + "context" + "reflect" + "sync" +) + +// MergeChans returns a channel that is closed when any of the input channels are signaled. +// The caller must call the returned CancelFunc to ensure no resources are leaked. +func MergeChans[T any](chans ...<-chan T) (<-chan T, context.CancelFunc) { + var once sync.Once + out := make(chan T) + cancel := make(chan T) + cancelFunc := func() { + once.Do(func() { + close(cancel) + }) + <-out + } + cases := make([]reflect.SelectCase, len(chans)+1) + for i := range chans { + cases[i] = reflect.SelectCase{ + Dir: reflect.SelectRecv, + Chan: reflect.ValueOf(chans[i]), + } + } + cases[len(cases)-1] = reflect.SelectCase{ + Dir: reflect.SelectRecv, + Chan: reflect.ValueOf(cancel), + } + go func() { + defer close(out) + _, _, _ = reflect.Select(cases) + }() + + return out, cancelFunc +} diff --git a/pkg/internal/syncs/syncs_test.go b/pkg/internal/syncs/syncs_test.go new file mode 100644 index 0000000000..7bf7d598a0 --- /dev/null +++ b/pkg/internal/syncs/syncs_test.go @@ -0,0 +1,107 @@ +package syncs + +import ( + "testing" + "time" + + // This appears to be needed so that the prow test runner won't fail. + _ "github.com/onsi/ginkgo/v2" + _ "github.com/onsi/gomega" +) + +func TestMergeChans(t *testing.T) { + tests := []struct { + name string + count int + signal int + }{ + { + name: "single channel, close 0", + count: 1, + signal: 0, + }, + { + name: "double channel, close 0", + count: 2, + signal: 0, + }, + { + name: "five channel, close 0", + count: 5, + signal: 0, + }, + { + name: "five channel, close 1", + count: 5, + signal: 1, + }, + { + name: "five channel, close 2", + count: 5, + signal: 2, + }, + { + name: "five channel, close 3", + count: 5, + signal: 3, + }, + { + name: "five channel, close 4", + count: 5, + signal: 4, + }, + { + name: "single channel, cancel", + count: 1, + signal: -1, + }, + { + name: "double channel, cancel", + count: 2, + signal: -1, + }, + { + name: "five channel, cancel", + count: 5, + signal: -1, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if callAndClose(test.count, test.signal, 1) { + t.Error("timeout before merged channel closed") + } + }) + } +} + +func callAndClose(numChans, signalChan, timeoutSeconds int) bool { + chans := make([]chan struct{}, numChans) + readOnlyChans := make([]<-chan struct{}, numChans) + for i := range chans { + chans[i] = make(chan struct{}) + readOnlyChans[i] = chans[i] + } + defer func() { + for i := range chans { + close(chans[i]) + } + }() + + merged, cancel := MergeChans(readOnlyChans...) + defer cancel() + + timer := time.NewTimer(time.Duration(timeoutSeconds) * time.Second) + + if signalChan >= 0 { + chans[signalChan] <- struct{}{} + } else { + cancel() + } + select { + case <-merged: + return false + case <-timer.C: + return true + } +} diff --git a/pkg/internal/testing/addr/addr_suite_test.go b/pkg/internal/testing/addr/addr_suite_test.go new file mode 100644 index 0000000000..3869bb0207 --- /dev/null +++ b/pkg/internal/testing/addr/addr_suite_test.go @@ -0,0 +1,30 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package addr_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestAddr(t *testing.T) { + t.Parallel() + RegisterFailHandler(Fail) + RunSpecs(t, "Addr Suite") +} diff --git a/pkg/internal/testing/addr/manager.go b/pkg/internal/testing/addr/manager.go new file mode 100644 index 0000000000..ffa33a8861 --- /dev/null +++ b/pkg/internal/testing/addr/manager.go @@ -0,0 +1,142 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package addr + +import ( + "errors" + "fmt" + "io/fs" + "net" + "os" + "path/filepath" + "strings" + "time" + + "sigs.k8s.io/controller-runtime/pkg/internal/flock" +) + +// TODO(directxman12): interface / release functionality for external port managers + +const ( + portReserveTime = 2 * time.Minute + portConflictRetry = 100 + portFilePrefix = "port-" +) + +var ( + cacheDir string +) + +func init() { + baseDir, err := os.UserCacheDir() + if err == nil { + cacheDir = filepath.Join(baseDir, "kubebuilder-envtest") + err = os.MkdirAll(cacheDir, 0o750) + } + if err != nil { + // Either we didn't get a cache directory, or we can't use it + baseDir = os.TempDir() + cacheDir = filepath.Join(baseDir, "kubebuilder-envtest") + err = os.MkdirAll(cacheDir, 0o750) + } + if err != nil { + panic(err) + } +} + +type portCache struct{} + +func (c *portCache) add(port int) (bool, error) { + // Remove outdated ports. + if err := fs.WalkDir(os.DirFS(cacheDir), ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() || !d.Type().IsRegular() || !strings.HasPrefix(path, portFilePrefix) { + return nil + } + info, err := d.Info() + if err != nil { + // No-op if file no longer exists; may have been deleted by another + // process/thread trying to allocate ports. + if errors.Is(err, fs.ErrNotExist) { + return nil + } + return err + } + if time.Since(info.ModTime()) > portReserveTime { + if err := os.Remove(filepath.Join(cacheDir, path)); err != nil { + // No-op if file no longer exists; may have been deleted by another + // process/thread trying to allocate ports. + if os.IsNotExist(err) { + return nil + } + return err + } + } + return nil + }); err != nil { + return false, err + } + // Try allocating new port, by acquiring a file. + path := fmt.Sprintf("%s/%s%d", cacheDir, portFilePrefix, port) + if err := flock.Acquire(path); errors.Is(err, flock.ErrAlreadyLocked) { + return false, nil + } else if err != nil { + return false, err + } + return true, nil +} + +var cache = &portCache{} + +func suggest(listenHost string) (*net.TCPListener, int, string, error) { + if listenHost == "" { + listenHost = "localhost" + } + addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(listenHost, "0")) + if err != nil { + return nil, -1, "", err + } + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return nil, -1, "", err + } + return l, l.Addr().(*net.TCPAddr).Port, + addr.IP.String(), + nil +} + +// Suggest suggests an address a process can listen on. It returns +// a tuple consisting of a free port and the hostname resolved to its IP. +// It makes sure that new port allocated does not conflict with old ports +// allocated within 1 minute. +func Suggest(listenHost string) (int, string, error) { + for i := 0; i < portConflictRetry; i++ { + listener, port, resolvedHost, err := suggest(listenHost) + if err != nil { + return -1, "", err + } + defer listener.Close() + if ok, err := cache.add(port); ok { + return port, resolvedHost, nil + } else if err != nil { + return -1, "", err + } + } + return -1, "", fmt.Errorf("no free ports found after %d retries", portConflictRetry) +} diff --git a/pkg/internal/testing/addr/manager_test.go b/pkg/internal/testing/addr/manager_test.go new file mode 100644 index 0000000000..065e847dc5 --- /dev/null +++ b/pkg/internal/testing/addr/manager_test.go @@ -0,0 +1,77 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package addr_test + +import ( + "net" + "strconv" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "sigs.k8s.io/controller-runtime/pkg/internal/testing/addr" +) + +var _ = Describe("SuggestAddress", func() { + It("returns a free port and an address to bind to", func() { + port, host, err := addr.Suggest("") + + Expect(err).NotTo(HaveOccurred()) + Expect(host).To(Or(Equal("127.0.0.1"), Equal("::1"))) + Expect(port).NotTo(Equal(0)) + + addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(host, strconv.Itoa(port))) + Expect(err).NotTo(HaveOccurred()) + l, err := net.ListenTCP("tcp", addr) + defer func() { + Expect(l.Close()).To(Succeed()) + }() + Expect(err).NotTo(HaveOccurred()) + }) + + It("supports an explicit listenHost", func() { + port, host, err := addr.Suggest("localhost") + + Expect(err).NotTo(HaveOccurred()) + Expect(host).To(Or(Equal("127.0.0.1"), Equal("::1"))) + Expect(port).NotTo(Equal(0)) + + addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(host, strconv.Itoa(port))) + Expect(err).NotTo(HaveOccurred()) + l, err := net.ListenTCP("tcp", addr) + defer func() { + Expect(l.Close()).To(Succeed()) + }() + Expect(err).NotTo(HaveOccurred()) + }) + + It("supports a 0.0.0.0 listenHost", func() { + port, host, err := addr.Suggest("0.0.0.0") + + Expect(err).NotTo(HaveOccurred()) + Expect(host).To(Equal("0.0.0.0")) + Expect(port).NotTo(Equal(0)) + + addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(host, strconv.Itoa(port))) + Expect(err).NotTo(HaveOccurred()) + l, err := net.ListenTCP("tcp", addr) + defer func() { + Expect(l.Close()).To(Succeed()) + }() + Expect(err).NotTo(HaveOccurred()) + }) +}) diff --git a/examples/conversion/pkg/apis/addtoscheme_jobs_v2.go b/pkg/internal/testing/certs/certs_suite_test.go similarity index 67% rename from examples/conversion/pkg/apis/addtoscheme_jobs_v2.go rename to pkg/internal/testing/certs/certs_suite_test.go index 769e79230a..3b3008c294 100644 --- a/examples/conversion/pkg/apis/addtoscheme_jobs_v2.go +++ b/pkg/internal/testing/certs/certs_suite_test.go @@ -1,4 +1,5 @@ /* +Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,13 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ -package apis +package certs_test import ( - "sigs.k8s.io/controller-runtime/examples/conversion/pkg/apis/jobs/v2" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" ) -func init() { - // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back - AddToSchemes = append(AddToSchemes, v2.SchemeBuilder.AddToScheme) +func TestInternal(t *testing.T) { + t.Parallel() + RegisterFailHandler(Fail) + RunSpecs(t, "TinyCA (Internal Certs) Suite") } diff --git a/pkg/internal/testing/certs/tinyca.go b/pkg/internal/testing/certs/tinyca.go new file mode 100644 index 0000000000..b4188237e6 --- /dev/null +++ b/pkg/internal/testing/certs/tinyca.go @@ -0,0 +1,224 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certs + +// NB(directxman12): nothing has verified that this has good settings. In fact, +// the setting generated here are probably terrible, but they're fine for integration +// tests. These ABSOLUTELY SHOULD NOT ever be exposed in the public API. They're +// ONLY for use with envtest's ability to configure webhook testing. +// If I didn't otherwise not want to add a dependency on cfssl, I'd just use that. + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + crand "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "net" + "time" + + certutil "k8s.io/client-go/util/cert" +) + +var ( + ellipticCurve = elliptic.P256() + bigOne = big.NewInt(1) +) + +// CertPair is a private key and certificate for use for client auth, as a CA, or serving. +type CertPair struct { + Key crypto.Signer + Cert *x509.Certificate +} + +// CertBytes returns the PEM-encoded version of the certificate for this pair. +func (k CertPair) CertBytes() []byte { + return pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: k.Cert.Raw, + }) +} + +// AsBytes encodes keypair in the appropriate formats for on-disk storage (PEM and +// PKCS8, respectively). +func (k CertPair) AsBytes() (cert []byte, key []byte, err error) { + cert = k.CertBytes() + + rawKeyData, err := x509.MarshalPKCS8PrivateKey(k.Key) + if err != nil { + return nil, nil, fmt.Errorf("unable to encode private key: %w", err) + } + + key = pem.EncodeToMemory(&pem.Block{ + Type: "PRIVATE KEY", + Bytes: rawKeyData, + }) + + return cert, key, nil +} + +// TinyCA supports signing serving certs and client-certs, +// and can be used as an auth mechanism with envtest. +type TinyCA struct { + CA CertPair + orgName string + + nextSerial *big.Int +} + +// newPrivateKey generates a new private key of a relatively sane size (see +// rsaKeySize). +func newPrivateKey() (crypto.Signer, error) { + return ecdsa.GenerateKey(ellipticCurve, crand.Reader) +} + +// NewTinyCA creates a new a tiny CA utility for provisioning serving certs and client certs FOR TESTING ONLY. +// Don't use this for anything else! +func NewTinyCA() (*TinyCA, error) { + caPrivateKey, err := newPrivateKey() + if err != nil { + return nil, fmt.Errorf("unable to generate private key for CA: %w", err) + } + caCfg := certutil.Config{CommonName: "envtest-environment", Organization: []string{"envtest"}} + caCert, err := certutil.NewSelfSignedCACert(caCfg, caPrivateKey) + if err != nil { + return nil, fmt.Errorf("unable to generate certificate for CA: %w", err) + } + + return &TinyCA{ + CA: CertPair{Key: caPrivateKey, Cert: caCert}, + orgName: "envtest", + nextSerial: big.NewInt(1), + }, nil +} + +func (c *TinyCA) makeCert(cfg certutil.Config) (CertPair, error) { + now := time.Now() + + key, err := newPrivateKey() + if err != nil { + return CertPair{}, fmt.Errorf("unable to create private key: %w", err) + } + + serial := new(big.Int).Set(c.nextSerial) + c.nextSerial.Add(c.nextSerial, bigOne) + + template := x509.Certificate{ + Subject: pkix.Name{CommonName: cfg.CommonName, Organization: cfg.Organization}, + DNSNames: cfg.AltNames.DNSNames, + IPAddresses: cfg.AltNames.IPs, + SerialNumber: serial, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: cfg.Usages, + + // technically not necessary for testing, but let's set anyway just in case. + NotBefore: now.UTC(), + // 1 week -- the default for cfssl, and just long enough for a + // long-term test, but not too long that anyone would try to use this + // seriously. + NotAfter: now.Add(168 * time.Hour).UTC(), + } + + certRaw, err := x509.CreateCertificate(crand.Reader, &template, c.CA.Cert, key.Public(), c.CA.Key) + if err != nil { + return CertPair{}, fmt.Errorf("unable to create certificate: %w", err) + } + + cert, err := x509.ParseCertificate(certRaw) + if err != nil { + return CertPair{}, fmt.Errorf("generated invalid certificate, could not parse: %w", err) + } + + return CertPair{ + Key: key, + Cert: cert, + }, nil +} + +// NewServingCert returns a new CertPair for a serving HTTPS on localhost (or other specified names). +func (c *TinyCA) NewServingCert(names ...string) (CertPair, error) { + if len(names) == 0 { + names = []string{"localhost"} + } + dnsNames, ips, err := resolveNames(names) + if err != nil { + return CertPair{}, err + } + + return c.makeCert(certutil.Config{ + CommonName: "localhost", + Organization: []string{c.orgName}, + AltNames: certutil.AltNames{ + DNSNames: dnsNames, + IPs: ips, + }, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }) +} + +// ClientInfo describes some Kubernetes user for the purposes of creating +// client certificates. +type ClientInfo struct { + // Name is the user name (embedded as the cert's CommonName) + Name string + // Groups are the groups to which this user belongs (embedded as the cert's + // Organization) + Groups []string +} + +// NewClientCert produces a new CertPair suitable for use with Kubernetes +// client cert auth with an API server validating based on this CA. +func (c *TinyCA) NewClientCert(user ClientInfo) (CertPair, error) { + return c.makeCert(certutil.Config{ + CommonName: user.Name, + Organization: user.Groups, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + }) +} + +func resolveNames(names []string) ([]string, []net.IP, error) { + dnsNames := []string{} + ips := []net.IP{} + for _, name := range names { + if name == "" { + continue + } + ip := net.ParseIP(name) + if ip == nil { + dnsNames = append(dnsNames, name) + // Also resolve to IPs. + nameIPs, err := net.LookupHost(name) + if err != nil { + return nil, nil, err + } + for _, nameIP := range nameIPs { + ip = net.ParseIP(nameIP) + if ip != nil { + ips = append(ips, ip) + } + } + } else { + ips = append(ips, ip) + } + } + return dnsNames, ips, nil +} diff --git a/pkg/internal/testing/certs/tinyca_test.go b/pkg/internal/testing/certs/tinyca_test.go new file mode 100644 index 0000000000..5d84de56fb --- /dev/null +++ b/pkg/internal/testing/certs/tinyca_test.go @@ -0,0 +1,254 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certs_test + +import ( + "crypto/x509" + "encoding/pem" + "math/big" + "net" + "sort" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" + + "sigs.k8s.io/controller-runtime/pkg/internal/testing/certs" +) + +var _ = Describe("TinyCA", func() { + var ca *certs.TinyCA + + BeforeEach(func() { + var err error + ca, err = certs.NewTinyCA() + Expect(err).NotTo(HaveOccurred(), "should be able to initialize the CA") + }) + + Describe("the CA certs themselves", func() { + It("should be retrievable as a cert pair", func() { + Expect(ca.CA.Key).NotTo(BeNil(), "should have a key") + Expect(ca.CA.Cert).NotTo(BeNil(), "should have a cert") + }) + + It("should be usable for signing & verifying", func() { + Expect(ca.CA.Cert.KeyUsage&x509.KeyUsageCertSign).NotTo(BeEquivalentTo(0), "should be usable for cert signing") + Expect(ca.CA.Cert.KeyUsage&x509.KeyUsageDigitalSignature).NotTo(BeEquivalentTo(0), "should be usable for signature verifying") + }) + }) + + It("should produce unique serials among all generated certificates of all types", func() { + By("generating a few cert pairs for both serving and client auth") + firstCerts, err := ca.NewServingCert() + Expect(err).NotTo(HaveOccurred()) + secondCerts, err := ca.NewClientCert(certs.ClientInfo{Name: "user"}) + Expect(err).NotTo(HaveOccurred()) + thirdCerts, err := ca.NewServingCert() + Expect(err).NotTo(HaveOccurred()) + + By("checking that they have different serials") + serials := []*big.Int{ + firstCerts.Cert.SerialNumber, + secondCerts.Cert.SerialNumber, + thirdCerts.Cert.SerialNumber, + } + // quick uniqueness check of numbers: sort, then you only have to compare sequential entries + sort.Slice(serials, func(i, j int) bool { + return serials[i].Cmp(serials[j]) == -1 + }) + Expect(serials[1].Cmp(serials[0])).NotTo(Equal(0), "serials shouldn't be equal") + Expect(serials[2].Cmp(serials[1])).NotTo(Equal(0), "serials shouldn't be equal") + }) + + Describe("Generated serving certs", func() { + It("should be valid for short enough to avoid production usage, but long enough for long-running tests", func() { + cert, err := ca.NewServingCert() + Expect(err).NotTo(HaveOccurred(), "should be able to generate the serving certs") + + duration := time.Until(cert.Cert.NotAfter) + Expect(duration).To(BeNumerically("<=", 168*time.Hour), "not-after should be short-ish (<= 1 week)") + Expect(duration).To(BeNumerically(">=", 2*time.Hour), "not-after should be enough for long tests (couple of hours)") + }) + + Context("when encoding names", func() { + var cert certs.CertPair + BeforeEach(func() { + By("generating a serving cert with IPv4 & IPv6 addresses, and DNS names") + var err error + // IPs are in the "example & docs" blocks for IPv4 (TEST-NET-1) & IPv6 + cert, err = ca.NewServingCert("192.0.2.1", "localhost", "2001:db8::") + Expect(err).NotTo(HaveOccurred(), "should be able to create the serving certs") + }) + + It("should encode all non-IP names as DNS SANs", func() { + Expect(cert.Cert.DNSNames).To(ConsistOf("localhost")) + }) + + It("should encode all IP names as IP SANs", func() { + // NB(directxman12): this is non-exhaustive because we also + // convert DNS SANs to IPs too (see test below) + Expect(cert.Cert.IPAddresses).To(ContainElements( + // normalize the elements with To16 so we can compare them to the output of + // of ParseIP safely (the alternative is a custom matcher that calls Equal, + // but this is easier) + WithTransform(net.IP.To16, Equal(net.ParseIP("192.0.2.1"))), + WithTransform(net.IP.To16, Equal(net.ParseIP("2001:db8::"))), + )) + }) + + It("should add the corresponding IP address(es) (as IP SANs) for DNS names", func() { + // NB(directxman12): we currently fail if the lookup fails. + // I'm not certain this is the best idea (both the bailing on + // error and the actual idea), so if this causes issues, you + // might want to reconsider. + + localhostAddrs, err := net.LookupHost("localhost") + Expect(err).NotTo(HaveOccurred(), "should be able to find IPs for localhost") + localhostIPs := make([]interface{}, len(localhostAddrs)) + for i, addr := range localhostAddrs { + // normalize the elements with To16 so we can compare them to the output of + // of ParseIP safely (the alternative is a custom matcher that calls Equal, + // but this is easier) + localhostIPs[i] = WithTransform(net.IP.To16, Equal(net.ParseIP(addr))) + } + Expect(cert.Cert.IPAddresses).To(ContainElements(localhostIPs...)) + }) + }) + + It("should assume a name of localhost (DNS SAN) if no names are given", func() { + cert, err := ca.NewServingCert() + Expect(err).NotTo(HaveOccurred(), "should be able to generate a serving cert with the default name") + Expect(cert.Cert.DNSNames).To(ConsistOf("localhost"), "the default DNS name should be localhost") + + }) + + It("should be usable for server auth, verifying, and enciphering", func() { + cert, err := ca.NewServingCert() + Expect(err).NotTo(HaveOccurred(), "should be able to generate a serving cert") + + Expect(cert.Cert.KeyUsage&x509.KeyUsageKeyEncipherment).NotTo(BeEquivalentTo(0), "should be usable for key enciphering") + Expect(cert.Cert.KeyUsage&x509.KeyUsageDigitalSignature).NotTo(BeEquivalentTo(0), "should be usable for signature verifying") + Expect(cert.Cert.ExtKeyUsage).To(ContainElement(x509.ExtKeyUsageServerAuth), "should be usable for server auth") + + }) + + It("should be signed by the CA", func() { + cert, err := ca.NewServingCert() + Expect(err).NotTo(HaveOccurred(), "should be able to generate a serving cert") + Expect(cert.Cert.CheckSignatureFrom(ca.CA.Cert)).To(Succeed()) + }) + }) + + Describe("Generated client certs", func() { + var cert certs.CertPair + BeforeEach(func() { + var err error + cert, err = ca.NewClientCert(certs.ClientInfo{ + Name: "user", + Groups: []string{"group1", "group2"}, + }) + Expect(err).NotTo(HaveOccurred(), "should be able to create a client cert") + }) + + It("should be valid for short enough to avoid production usage, but long enough for long-running tests", func() { + duration := time.Until(cert.Cert.NotAfter) + Expect(duration).To(BeNumerically("<=", 168*time.Hour), "not-after should be short-ish (<= 1 week)") + Expect(duration).To(BeNumerically(">=", 2*time.Hour), "not-after should be enough for long tests (couple of hours)") + }) + + It("should be usable for client auth, verifying, and enciphering", func() { + Expect(cert.Cert.KeyUsage&x509.KeyUsageKeyEncipherment).NotTo(BeEquivalentTo(0), "should be usable for key enciphering") + Expect(cert.Cert.KeyUsage&x509.KeyUsageDigitalSignature).NotTo(BeEquivalentTo(0), "should be usable for signature verifying") + Expect(cert.Cert.ExtKeyUsage).To(ContainElement(x509.ExtKeyUsageClientAuth), "should be usable for client auth") + }) + + It("should encode the user name as the common name", func() { + Expect(cert.Cert.Subject.CommonName).To(Equal("user")) + }) + + It("should encode the groups as the organization values", func() { + Expect(cert.Cert.Subject.Organization).To(ConsistOf("group1", "group2")) + }) + + It("should be signed by the CA", func() { + Expect(cert.Cert.CheckSignatureFrom(ca.CA.Cert)).To(Succeed()) + }) + }) +}) + +var _ = Describe("Certificate Pairs", func() { + var pair certs.CertPair + BeforeEach(func() { + ca, err := certs.NewTinyCA() + Expect(err).NotTo(HaveOccurred(), "should be able to generate a cert pair") + + pair = ca.CA + }) + + Context("when serializing just the public key", func() { + It("should serialize into a CERTIFICATE PEM block", func() { + bytes := pair.CertBytes() + Expect(bytes).NotTo(BeEmpty(), "should produce some cert bytes") + + block, rest := pem.Decode(bytes) + Expect(rest).To(BeEmpty(), "shouldn't have any data besides the PEM block") + + Expect(block).To(PointTo(MatchAllFields(Fields{ + "Type": Equal("CERTIFICATE"), + "Headers": BeEmpty(), + "Bytes": Equal(pair.Cert.Raw), + }))) + }) + }) + + Context("when serializing both parts", func() { + var certBytes, keyBytes []byte + BeforeEach(func() { + var err error + certBytes, keyBytes, err = pair.AsBytes() + Expect(err).NotTo(HaveOccurred(), "should be able to serialize the pair") + }) + + It("should serialize the private key in PKCS8 form in a PRIVATE KEY PEM block", func() { + Expect(keyBytes).NotTo(BeEmpty(), "should produce some key bytes") + + By("decoding & checking the PEM block") + block, rest := pem.Decode(keyBytes) + Expect(rest).To(BeEmpty(), "shouldn't have any data besides the PEM block") + + Expect(block.Type).To(Equal("PRIVATE KEY")) + + By("decoding & checking the PKCS8 data") + Expect(x509.ParsePKCS8PrivateKey(block.Bytes)).NotTo(BeNil(), "should be able to parse back the private key") + }) + + It("should serialize the public key into a CERTIFICATE PEM block", func() { + Expect(certBytes).NotTo(BeEmpty(), "should produce some cert bytes") + + block, rest := pem.Decode(certBytes) + Expect(rest).To(BeEmpty(), "shouldn't have any data besides the PEM block") + + Expect(block).To(PointTo(MatchAllFields(Fields{ + "Type": Equal("CERTIFICATE"), + "Headers": BeEmpty(), + "Bytes": Equal(pair.Cert.Raw), + }))) + }) + + }) +}) diff --git a/pkg/internal/testing/controlplane/apiserver.go b/pkg/internal/testing/controlplane/apiserver.go new file mode 100644 index 0000000000..aadb69e84f --- /dev/null +++ b/pkg/internal/testing/controlplane/apiserver.go @@ -0,0 +1,476 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "strconv" + "time" + + "sigs.k8s.io/controller-runtime/pkg/internal/testing/addr" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/certs" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +const ( + // saKeyFile is the name of the service account signing private key file. + saKeyFile = "sa-signer.key" + // saKeyFile is the name of the service account signing public key (cert) file. + saCertFile = "sa-signer.crt" +) + +// SecureServing provides/configures how the API server serves on the secure port. +type SecureServing struct { + // ListenAddr contains the host & port to serve on. + // + // Configurable. If unset, it will be defaulted. + process.ListenAddr + // CA contains the CA that signed the API server's serving certificates. + // + // Read-only. + CA []byte + // Authn can be used to provision users, and override what type of + // authentication is used to provision users. + // + // Configurable. If unset, it will be defaulted. + Authn +} + +// APIServer knows how to run a kubernetes apiserver. +type APIServer struct { + // URL is the address the ApiServer should listen on for client + // connections. + // + // If set, this will configure the *insecure* serving details. + // If unset, it will contain the insecure port if insecure serving is enabled, + // and otherwise will contain the secure port. + // + // If this is not specified, we default to a random free port on localhost. + // + // Deprecated: use InsecureServing (for the insecure URL) or SecureServing, ideally. + URL *url.URL + + // SecurePort is the additional secure port that the APIServer should listen on. + // + // If set, this will override SecureServing.Port. + // + // Deprecated: use SecureServing. + SecurePort int + + // SecureServing indicates how the API server will serve on the secure port. + // + // Some parts are configurable. Will be defaulted if unset. + SecureServing + + // InsecureServing indicates how the API server will serve on the insecure port. + // + // If unset, the insecure port will be disabled. Set to an empty struct to get + // default values. + // + // Deprecated: does not work with Kubernetes versions 1.20 and above. Use secure + // serving instead. + InsecureServing *process.ListenAddr + + // Path is the path to the apiserver binary. + // + // If this is left as the empty string, we will attempt to locate a binary, + // by checking for the TEST_ASSET_KUBE_APISERVER environment variable, and + // the default test assets directory. See the "Binaries" section above (in + // doc.go) for details. + Path string + + // Args is a list of arguments which will passed to the APIServer binary. + // Before they are passed on, they will be evaluated as go-template strings. + // This means you can use fields which are defined and exported on this + // APIServer struct (e.g. "--cert-dir={{ .Dir }}"). + // Those templates will be evaluated after the defaulting of the APIServer's + // fields has already happened and just before the binary actually gets + // started. Thus you have access to calculated fields like `URL` and others. + // + // If not specified, the minimal set of arguments to run the APIServer will + // be used. + // + // They will be loaded into the same argument set as Configure. Each flag + // will be Append-ed to the configured arguments just before launch. + // + // Deprecated: use Configure instead. + Args []string + + // CertDir is a path to a directory containing whatever certificates the + // APIServer will need. + // + // If left unspecified, then the Start() method will create a fresh temporary + // directory, and the Stop() method will clean it up. + CertDir string + + // EtcdURL is the URL of the Etcd the APIServer should use. + // + // If this is not specified, the Start() method will return an error. + EtcdURL *url.URL + + // StartTimeout, StopTimeout specify the time the APIServer is allowed to + // take when starting and stoppping before an error is emitted. + // + // If not specified, these default to 20 seconds. + StartTimeout time.Duration + StopTimeout time.Duration + + // Out, Err specify where APIServer should write its StdOut, StdErr to. + // + // If not specified, the output will be discarded. + Out io.Writer + Err io.Writer + + processState *process.State + + // args contains the structured arguments to use for running the API server + // Lazily initialized by .Configure(), Defaulted eventually with .defaultArgs() + args *process.Arguments +} + +// Configure returns Arguments that may be used to customize the +// flags used to launch the API server. A set of defaults will +// be applied underneath. +func (s *APIServer) Configure() *process.Arguments { + if s.args == nil { + s.args = process.EmptyArguments() + } + return s.args +} + +// Start starts the apiserver, waits for it to come up, and returns an error, +// if occurred. +func (s *APIServer) Start() error { + if err := s.prepare(); err != nil { + return err + } + return s.processState.Start(s.Out, s.Err) +} + +func (s *APIServer) prepare() error { + if err := s.setProcessState(); err != nil { + return err + } + return s.Authn.Start() +} + +// configurePorts configures the serving ports for this API server. +// +// Most of this method currently deals with making the deprecated fields +// take precedence over the new fields. +func (s *APIServer) configurePorts() error { + // prefer the old fields to the new fields if a user set one, + // otherwise, default the new fields and populate the old ones. + + // Insecure: URL, InsecureServing + if s.URL != nil { + s.InsecureServing = &process.ListenAddr{ + Address: s.URL.Hostname(), + Port: s.URL.Port(), + } + } else if insec := s.InsecureServing; insec != nil { + if insec.Port == "" || insec.Address == "" { + port, host, err := addr.Suggest("") + if err != nil { + return fmt.Errorf("unable to provision unused insecure port: %w", err) + } + s.InsecureServing.Port = strconv.Itoa(port) + s.InsecureServing.Address = host + } + s.URL = s.InsecureServing.URL("http", "") + } + + // Secure: SecurePort, SecureServing + if s.SecurePort != 0 { + s.SecureServing.Port = strconv.Itoa(s.SecurePort) + // if we don't have an address, try the insecure address, and otherwise + // default to loopback. + if s.SecureServing.Address == "" { + if s.InsecureServing != nil { + s.SecureServing.Address = s.InsecureServing.Address + } else { + s.SecureServing.Address = "127.0.0.1" + } + } + } else if s.SecureServing.Port == "" || s.SecureServing.Address == "" { + port, host, err := addr.Suggest("") + if err != nil { + return fmt.Errorf("unable to provision unused secure port: %w", err) + } + s.SecureServing.Port = strconv.Itoa(port) + s.SecureServing.Address = host + s.SecurePort = port + } + + return nil +} + +func (s *APIServer) setProcessState() error { + if s.EtcdURL == nil { + return fmt.Errorf("expected EtcdURL to be configured") + } + + var err error + + // unconditionally re-set this so we can successfully restart + // TODO(directxman12): we supported this in the past, but do we actually + // want to support re-using an API server object to restart? The loss + // of provisioned users is surprising to say the least. + s.processState = &process.State{ + Dir: s.CertDir, + Path: s.Path, + StartTimeout: s.StartTimeout, + StopTimeout: s.StopTimeout, + } + if err := s.processState.Init("kube-apiserver"); err != nil { + return err + } + + if err := s.configurePorts(); err != nil { + return err + } + + // the secure port will always be on, so use that + s.processState.HealthCheck.URL = *s.SecureServing.URL("https", "/healthz") + + s.CertDir = s.processState.Dir + s.Path = s.processState.Path + s.StartTimeout = s.processState.StartTimeout + s.StopTimeout = s.processState.StopTimeout + + if err := s.populateAPIServerCerts(); err != nil { + return err + } + + if s.SecureServing.Authn == nil { + authn, err := NewCertAuthn() + if err != nil { + return err + } + s.SecureServing.Authn = authn + } + + if err := s.Authn.Configure(s.CertDir, s.Configure()); err != nil { + return err + } + + // NB(directxman12): insecure port is a mess: + // - 1.19 and below have the `--insecure-port` flag, and require it to be set to zero to + // disable it, otherwise the default will be used and we'll conflict. + // - 1.20 requires the flag to be unset or set to zero, and yells at you if you configure it + // - 1.24 won't have the flag at all... + // + // In an effort to automatically do the right thing during this mess, we do feature discovery + // on the flags, and hope that we've "parsed" them properly. + // + // TODO(directxman12): once we support 1.20 as the min version (might be when 1.24 comes out, + // might be around 1.25 or 1.26), remove this logic and the corresponding line in API server's + // default args. + if err := s.discoverFlags(); err != nil { + return err + } + + s.processState.Args, s.Args, err = process.TemplateAndArguments(s.Args, s.Configure(), process.TemplateDefaults{ //nolint:staticcheck + Data: s, + Defaults: s.defaultArgs(), + MinimalDefaults: map[string][]string{ + // as per kubernetes-sigs/controller-runtime#641, we need this (we + // probably need other stuff too, but this is the only thing that was + // previously considered a "minimal default") + "service-cluster-ip-range": {"10.0.0.0/24"}, + + // we need *some* authorization mode for health checks on the secure port, + // so default to RBAC unless the user set something else (in which case + // this'll be ignored due to SliceToArguments using AppendNoDefaults). + "authorization-mode": {"RBAC"}, + }, + }) + if err != nil { + return err + } + + return nil +} + +// discoverFlags checks for certain flags that *must* be set in certain +// versions, and *must not* be set in others. +func (s *APIServer) discoverFlags() error { + // Present: <1.24, Absent: >= 1.24 + present, err := s.processState.CheckFlag("insecure-port") + if err != nil { + return err + } + + if !present { + s.Configure().Disable("insecure-port") + } + + return nil +} + +func (s *APIServer) defaultArgs() map[string][]string { + args := map[string][]string{ + "service-cluster-ip-range": {"10.0.0.0/24"}, + "allow-privileged": {"true"}, + // we're keeping this disabled because if enabled, default SA is + // missing which would force all tests to create one in normal + // apiserver operation this SA is created by controller, but that is + // not run in integration environment + "disable-admission-plugins": {"ServiceAccount"}, + "cert-dir": {s.CertDir}, + "authorization-mode": {"RBAC"}, + "secure-port": {s.SecureServing.Port}, + // NB(directxman12): previously we didn't set the bind address for the secure + // port. It *shouldn't* make a difference unless people are doing something really + // funky, but if you start to get bug reports look here ;-) + "bind-address": {s.SecureServing.Address}, + + // required on 1.20+, fine to leave on for <1.20 + "service-account-issuer": {s.SecureServing.URL("https", "/").String()}, + "service-account-key-file": {filepath.Join(s.CertDir, saCertFile)}, + "service-account-signing-key-file": {filepath.Join(s.CertDir, saKeyFile)}, + } + if s.EtcdURL != nil { + args["etcd-servers"] = []string{s.EtcdURL.String()} + } + if s.URL != nil { + args["insecure-port"] = []string{s.URL.Port()} + args["insecure-bind-address"] = []string{s.URL.Hostname()} + } else { + // TODO(directxman12): remove this once 1.21 is the lowest version we support + // (this might be a while, but this line'll break as of 1.24, so see the comment + // in Start + args["insecure-port"] = []string{"0"} + } + return args +} + +func (s *APIServer) populateAPIServerCerts() error { + _, statErr := os.Stat(filepath.Join(s.CertDir, "apiserver.crt")) + if !os.IsNotExist(statErr) { + return statErr + } + + ca, err := certs.NewTinyCA() + if err != nil { + return err + } + + servingAddresses := []string{"localhost"} + if s.SecureServing.ListenAddr.Address != "" { + servingAddresses = append(servingAddresses, s.SecureServing.ListenAddr.Address) + } + + servingCerts, err := ca.NewServingCert(servingAddresses...) + if err != nil { + return err + } + + certData, keyData, err := servingCerts.AsBytes() + if err != nil { + return err + } + + if err := os.WriteFile(filepath.Join(s.CertDir, "apiserver.crt"), certData, 0o640); err != nil { + return err + } + if err := os.WriteFile(filepath.Join(s.CertDir, "apiserver.key"), keyData, 0o640); err != nil { + return err + } + + s.SecureServing.CA = ca.CA.CertBytes() + + // service account signing files too + saCA, err := certs.NewTinyCA() + if err != nil { + return err + } + + saCert, saKey, err := saCA.CA.AsBytes() + if err != nil { + return err + } + + if err := os.WriteFile(filepath.Join(s.CertDir, saCertFile), saCert, 0o640); err != nil { + return err + } + return os.WriteFile(filepath.Join(s.CertDir, saKeyFile), saKey, 0o640) +} + +// Stop stops this process gracefully, waits for its termination, and cleans up +// the CertDir if necessary. +func (s *APIServer) Stop() error { + if s.processState != nil { + if s.processState.DirNeedsCleaning { + s.CertDir = "" // reset the directory if it was randomly allocated, so that we can safely restart + } + if err := s.processState.Stop(); err != nil { + return err + } + } + if s.Authn == nil { + return nil + } + return s.Authn.Stop() +} + +// APIServerDefaultArgs exposes the default args for the APIServer so that you +// can use those to append your own additional arguments. +// +// Note that these arguments don't handle newer API servers well to due the more +// complex feature detection neeeded. It's recommended that you switch to .Configure +// as you upgrade API server versions. +// +// Deprecated: use APIServer.Configure(). +var APIServerDefaultArgs = []string{ + "--advertise-address=127.0.0.1", + "--etcd-servers={{ if .EtcdURL }}{{ .EtcdURL.String }}{{ end }}", + "--cert-dir={{ .CertDir }}", + "--insecure-port={{ if .URL }}{{ .URL.Port }}{{else}}0{{ end }}", + "{{ if .URL }}--insecure-bind-address={{ .URL.Hostname }}{{ end }}", + "--secure-port={{ if .SecurePort }}{{ .SecurePort }}{{ end }}", + // we're keeping this disabled because if enabled, default SA is missing which would force all tests to create one + // in normal apiserver operation this SA is created by controller, but that is not run in integration environment + "--disable-admission-plugins=ServiceAccount", + "--service-cluster-ip-range=10.0.0.0/24", + "--allow-privileged=true", + // NB(directxman12): we also enable RBAC if nothing else was enabled +} + +// PrepareAPIServer is an internal-only (NEVER SHOULD BE EXPOSED) +// function that sets up the API server just before starting it, +// without actually starting it. This saves time on tests. +// +// NB(directxman12): do not expose this outside of internal -- it's unsafe to +// use, because things like port allocation could race even more than they +// currently do if you later call start! +func PrepareAPIServer(s *APIServer) error { + return s.prepare() +} + +// APIServerArguments is an internal-only (NEVER SHOULD BE EXPOSED) +// function that sets up the API server just before starting it, +// without actually starting it. It's public to make testing easier. +// +// NB(directxman12): do not expose this outside of internal. +func APIServerArguments(s *APIServer) []string { + return s.processState.Args +} diff --git a/pkg/internal/testing/controlplane/apiserver_test.go b/pkg/internal/testing/controlplane/apiserver_test.go new file mode 100644 index 0000000000..0811e9fb59 --- /dev/null +++ b/pkg/internal/testing/controlplane/apiserver_test.go @@ -0,0 +1,360 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane_test + +import ( + "crypto/x509" + "encoding/pem" + "errors" + "net" + "net/url" + "os" + "path" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/client-go/rest" + + . "sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +var _ = Describe("APIServer", func() { + var server *APIServer + BeforeEach(func() { + server = &APIServer{ + EtcdURL: &url.URL{}, + } + }) + JustBeforeEach(func() { + Expect(PrepareAPIServer(server)).To(Succeed()) + }) + Describe("setting up serving hosts & ports", func() { + Context("when URL is set", func() { + BeforeEach(func() { + server.URL = &url.URL{Scheme: "http", Host: "localhost:8675", Path: "/some-path"} + }) + + Context("when insecure serving is also set", func() { + BeforeEach(func() { + server.InsecureServing = &process.ListenAddr{ + Address: "localhost", + Port: "1234", + } + }) + + It("should override the existing insecure serving", func() { + Expect(server.InsecureServing).To(Equal(&process.ListenAddr{ + Address: "localhost", + Port: "8675", + })) + }) + }) + + It("should set insecure serving off of that", func() { + Expect(server.InsecureServing).To(Equal(&process.ListenAddr{ + Address: "localhost", + Port: "8675", + })) + }) + + It("should keep URL as-is", func() { + Expect(server.URL.String()).To(Equal("http://localhost:8675/some-path")) + }) + }) + + Context("when URL is not set but InsecureServing is set", func() { + BeforeEach(func() { + server.InsecureServing = &process.ListenAddr{} + }) + + Context("when host and port are set", func() { + BeforeEach(func() { + server.InsecureServing.Address = "localhost" + server.InsecureServing.Port = "8675" + }) + It("should set URL from InsecureServing", func() { + Expect(server.URL.String()).To(Equal("http://localhost:8675")) + }) + + It("should leave InsecureServing as-is if address and port are filled out", func() { + Expect(server.InsecureServing).To(Equal(&process.ListenAddr{ + Address: "localhost", + Port: "8675", + })) + }) + }) + + Context("when address and port are not filled out", func() { + BeforeEach(func() { + server.InsecureServing = &process.ListenAddr{} + }) + It("should default an insecure port", func() { + Expect(server.InsecureServing.Port).NotTo(BeEmpty()) + }) + It("should set URL from InsecureServing", func() { + Expect(server.URL.String()).To(Equal("http://" + server.InsecureServing.Address + ":" + server.InsecureServing.Port)) + }) + }) + }) + + Context("when neither URL or InsecureServing are set", func() { + It("should not default either of them", func() { + Expect(server.URL).To(BeNil(), "no URL should be set") + Expect(server.InsecureServing).To(BeNil(), "no insecure serving details should be set") + }) + }) + + Context("when SecureServing host & port are set", func() { + BeforeEach(func() { + server.Address = "localhost" + server.Port = "8675" + }) + + It("should leave SecureServing as-is", func() { + Expect(server.SecureServing.Address).To(Equal("localhost")) + Expect(server.SecureServing.Port).To(Equal("8675")) + }) + }) + + Context("when SecureServing is not set", func() { + It("should be defaulted with a random port", func() { + Expect(server.Port).NotTo(BeEquivalentTo(0)) + }) + }) + }) + + It("should default authn if not set", func() { + Expect(server.Authn).NotTo(BeNil()) + }) + + Describe("argument defaulting", func() { + // NB(directxman12): most of the templating vs configure logic is tested + // in arguments/arguments_test.go, so just test secure vs insecure port logic here + + Context("when insecure serving is set, on a binary that supports it", func() { + BeforeEach(func() { + server.InsecureServing = &process.ListenAddr{ + Address: "localhost", + Port: "8675", + } + server.Path = "./testdata/fake-1.19-apiserver.sh" + }) + It("should set the insecure-port and insecure-bind-address fields from insecureserving", func() { + Expect(APIServerArguments(server)).To(ContainElements( + "--insecure-port=8675", + "--insecure-bind-address=localhost", + )) + }) + }) + + Context("when insecureserving is disabled, on binaries with no insecure-port flag", func() { + BeforeEach(func() { + server.Path = "./testdata/fake-1.20-apiserver.sh" + }) + It("should not try to explicitly disable the insecure port", func() { + Expect(APIServerArguments(server)).NotTo(ContainElement(HavePrefix("--insecure-port"))) + }) + }) + + Context("when insecureserving is disabled, on binaries with an insecure-port flag", func() { + BeforeEach(func() { + server.Path = "./testdata/fake-1.19-apiserver.sh" + }) + It("should explicitly disable the insecure port", func() { + Expect(APIServerArguments(server)).To(ContainElement("--insecure-port=0")) + }) + }) + + Context("when given legacy-style template arguments", func() { + BeforeEach(func() { + server.Args = []string{"--foo=bar", "--baz={{ .Port }}"} + }) + It("should use the passed in args with the minimal required defaults", func() { + Expect(APIServerArguments(server)).To(ConsistOf( + "--foo=bar", + MatchRegexp(`--baz=\d+`), + "--service-cluster-ip-range=10.0.0.0/24", + MatchRegexp("--client-ca-file=.+"), + "--authorization-mode=RBAC", + )) + }) + }) + }) + + // These tests assume that 'localhost' resolves to 127.0.0.1. It can resolve + // to other addresses as well (e.g. ::1 on IPv6), but it must always resolve + // to 127.0.0.1. + Describe(("generated certificates"), func() { + getCertificate := func() *x509.Certificate { + // Read the cert file + certFile := path.Join(server.CertDir, "apiserver.crt") + certBytes, err := os.ReadFile(certFile) + Expect(err).NotTo(HaveOccurred(), "should be able to read the cert file") + + // Decode and parse it + block, remainder := pem.Decode(certBytes) + Expect(block).NotTo(BeNil(), "should be able to decode the cert file") + Expect(remainder).To(BeEmpty(), "should not have any extra data in the cert file") + Expect(block.Type).To(Equal("CERTIFICATE"), "should be a certificate block") + + cert, err := x509.ParseCertificate(block.Bytes) + Expect(err).NotTo(HaveOccurred(), "should be able to parse the cert file") + + return cert + } + + Context("when SecureServing are not set", func() { + It("should have localhost/127.0.0.1 in the certificate altnames", func() { + cert := getCertificate() + + Expect(cert.Subject.CommonName).To(Equal("localhost")) + Expect(cert.DNSNames).To(ConsistOf("localhost")) + expectedIPAddresses := []net.IP{ + net.ParseIP("127.0.0.1").To4(), + net.ParseIP(server.SecureServing.ListenAddr.Address).To4(), + } + Expect(cert.IPAddresses).To(ContainElements(expectedIPAddresses)) + }) + }) + + Context("when SecureServing host & port are set", func() { + BeforeEach(func() { + server.SecureServing = SecureServing{ + ListenAddr: process.ListenAddr{ + Address: "1.2.3.4", + Port: "5678", + }, + } + }) + + It("should have the host in the certificate altnames", func() { + cert := getCertificate() + + Expect(cert.Subject.CommonName).To(Equal("localhost")) + Expect(cert.DNSNames).To(ConsistOf("localhost")) + expectedIPAddresses := []net.IP{ + net.ParseIP("127.0.0.1").To4(), + net.ParseIP(server.SecureServing.ListenAddr.Address).To4(), + } + Expect(cert.IPAddresses).To(ContainElements(expectedIPAddresses)) + }) + }) + }) + + Describe("setting up auth", func() { + var auth *fakeAuthn + BeforeEach(func() { + auth = &fakeAuthn{ + setFlag: true, + } + server.Authn = auth + }) + It("should configure with the cert dir", func() { + Expect(auth.workDir).To(Equal(server.CertDir)) + }) + It("should pass its args to be configured", func() { + Expect(server.Configure().Get("configure-called").Get(nil)).To(ConsistOf("true")) + }) + + Context("when configuring auth errors out", func() { + It("should fail to configure", func() { + server := &APIServer{ + EtcdURL: &url.URL{}, + SecureServing: SecureServing{ + Authn: auth, + }, + } + auth.configureErr = errors.New("Oh no") + Expect(PrepareAPIServer(server)).NotTo(Succeed()) + }) + }) + }) + + Describe("managing", func() { + // some of these tests are combined for speed reasons -- starting the apiserver + // takes a while, relatively speaking + + var ( + auth *fakeAuthn + etcd *Etcd + ) + BeforeEach(func() { + etcd = &Etcd{} + Expect(etcd.Start()).To(Succeed()) + server.EtcdURL = etcd.URL + + auth = &fakeAuthn{} + server.Authn = auth + }) + AfterEach(func() { + Expect(etcd.Stop()).To(Succeed()) + }) + + Context("after starting", func() { + BeforeEach(func() { + Expect(server.Start()).To(Succeed()) + }) + + It("should stop successfully, and stop auth", func() { + Expect(server.Stop()).To(Succeed()) + Expect(auth.stopCalled).To(BeTrue()) + }) + }) + + It("should fail to start when auth fails to start", func() { + auth.startErr = errors.New("Oh no") + Expect(server.Start()).NotTo(Succeed()) + }) + + It("should start successfully & start auth", func() { + Expect(server.Start()).To(Succeed()) + defer func() { Expect(server.Stop()).To(Succeed()) }() + Expect(auth.startCalled).To(BeTrue()) + }) + }) +}) + +type fakeAuthn struct { + workDir string + + startCalled bool + stopCalled bool + setFlag bool + + configureErr error + startErr error +} + +func (f *fakeAuthn) Configure(workDir string, args *process.Arguments) error { + f.workDir = workDir + if f.setFlag { + args.Set("configure-called", "true") + } + return f.configureErr +} +func (f *fakeAuthn) Start() error { + f.startCalled = true + return f.startErr +} +func (f *fakeAuthn) AddUser(user User, baseCfg *rest.Config) (*rest.Config, error) { + return nil, nil +} +func (f *fakeAuthn) Stop() error { + f.stopCalled = true + return nil +} diff --git a/pkg/internal/testing/controlplane/auth.go b/pkg/internal/testing/controlplane/auth.go new file mode 100644 index 0000000000..b44035ebf2 --- /dev/null +++ b/pkg/internal/testing/controlplane/auth.go @@ -0,0 +1,142 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "fmt" + "os" + "path/filepath" + + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/certs" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +// User represents a Kubernetes user. +type User struct { + // Name is the user's Name. + Name string + // Groups are the groups to which the user belongs. + Groups []string +} + +// Authn knows how to configure an API server for a particular type of authentication, +// and provision users under that authentication scheme. +// +// The methods must be called in the following order (as presented below in the interface +// for a mnemonic): +// +// 1. Configure +// 2. Start +// 3. AddUsers (0+ calls) +// 4. Stop. +type Authn interface { + // Configure provides the working directory to this authenticator, + // and configures the given API server arguments to make use of this authenticator. + // + // Should be called first. + Configure(workDir string, args *process.Arguments) error + // Start runs this authenticator. Will be called just before API server start. + // + // Must be called after Configure. + Start() error + // AddUser provisions a user, returning a copy of the given base rest.Config + // configured to authenticate as that users. + // + // May only be called while the authenticator is "running". + AddUser(user User, baseCfg *rest.Config) (*rest.Config, error) + // Stop shuts down this authenticator. + Stop() error +} + +// CertAuthn is an authenticator (Authn) that makes use of client certificate authn. +type CertAuthn struct { + // ca is the CA used to sign the client certs + ca *certs.TinyCA + // certDir is the directory used to write the CA crt file + // so that the API server can read it. + certDir string +} + +// NewCertAuthn creates a new client-cert-based Authn with a new CA. +func NewCertAuthn() (*CertAuthn, error) { + ca, err := certs.NewTinyCA() + if err != nil { + return nil, fmt.Errorf("unable to provision client certificate auth CA: %w", err) + } + return &CertAuthn{ + ca: ca, + }, nil +} + +// AddUser provisions a new user that's authenticated via certificates, with +// the given uesrname and groups embedded in the certificate as expected by the +// API server. +func (c *CertAuthn) AddUser(user User, baseCfg *rest.Config) (*rest.Config, error) { + certs, err := c.ca.NewClientCert(certs.ClientInfo{ + Name: user.Name, + Groups: user.Groups, + }) + if err != nil { + return nil, fmt.Errorf("unable to create client certificates for %s: %w", user.Name, err) + } + + crt, key, err := certs.AsBytes() + if err != nil { + return nil, fmt.Errorf("unable to serialize client certificates for %s: %w", user.Name, err) + } + + cfg := rest.CopyConfig(baseCfg) + cfg.CertData = crt + cfg.KeyData = key + + return cfg, nil +} + +// caCrtPath returns the path to the on-disk client-cert CA crt file. +func (c *CertAuthn) caCrtPath() string { + return filepath.Join(c.certDir, "client-cert-auth-ca.crt") +} + +// Configure provides the working directory to this authenticator, +// and configures the given API server arguments to make use of this authenticator. +func (c *CertAuthn) Configure(workDir string, args *process.Arguments) error { + c.certDir = workDir + args.Set("client-ca-file", c.caCrtPath()) + return nil +} + +// Start runs this authenticator. Will be called just before API server start. +// +// Must be called after Configure. +func (c *CertAuthn) Start() error { + if len(c.certDir) == 0 { + return fmt.Errorf("start called before configure") + } + caCrt := c.ca.CA.CertBytes() + if err := os.WriteFile(c.caCrtPath(), caCrt, 0640); err != nil { + return fmt.Errorf("unable to save the client certificate CA to %s: %w", c.caCrtPath(), err) + } + + return nil +} + +// Stop shuts down this authenticator. +func (c *CertAuthn) Stop() error { + // no-op -- our workdir is cleaned up for us automatically + return nil +} diff --git a/pkg/internal/testing/controlplane/auth_test.go b/pkg/internal/testing/controlplane/auth_test.go new file mode 100644 index 0000000000..9891c6f2e2 --- /dev/null +++ b/pkg/internal/testing/controlplane/auth_test.go @@ -0,0 +1,175 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane_test + +import ( + "crypto/tls" + "crypto/x509" + "os" + "path/filepath" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/client-go/rest" + kcert "k8s.io/client-go/util/cert" + + cp "sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +var _ = Describe("Cert Authentication", func() { + var authn *cp.CertAuthn + BeforeEach(func() { + var err error + authn, err = cp.NewCertAuthn() + Expect(err).NotTo(HaveOccurred(), "should be able to create the cert authn") + }) + Context("when starting", func() { + It("should write the verifying CA to the configured directory", func() { + By("setting up a temp dir") + dir, err := os.MkdirTemp("", "envtest_controlplane_*") + Expect(err).NotTo(HaveOccurred(), "should be able to provision a temp dir") + if dir != "" { + defer os.RemoveAll(dir) + } + + By("configuring to use that dir") + Expect(authn.Configure(dir, process.EmptyArguments())).To(Succeed()) + + By("starting and checking the dir") + Expect(authn.Start()).To(Succeed()) + defer func() { Expect(authn.Stop()).To(Succeed()) }() // not strictly necessary, but future-proof + + _, err = os.Stat(filepath.Join(dir, "client-cert-auth-ca.crt")) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should error out if we haven't been configured yet", func() { + // NB(directxman12): no configure here intentionally + Expect(authn.Start()).NotTo(Succeed()) + }) + }) + Context("when configuring", func() { + It("should have set up the API server to use the written file for client cert auth", func() { + args := process.EmptyArguments() + Expect(authn.Configure("/tmp/____doesnotexist", args)).To(Succeed()) + Expect(args.Get("client-ca-file").Get(nil)).To(ConsistOf("/tmp/____doesnotexist/client-cert-auth-ca.crt")) + }) + }) + + Describe("creating users", func() { + user := cp.User{Name: "someuser", Groups: []string{"group1", "group2"}} + + Context("before starting", func() { + It("should yield a REST config that contains certs valid for the to-be-written CA", func() { + cfg, err := authn.AddUser(user, &rest.Config{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + Expect(cfg.CertData).NotTo(BeEmpty()) + Expect(cfg.KeyData).NotTo(BeEmpty()) + + // double-check the cert (assume the key is fine if it's present + // and the cert is also present, cause it's more annoying to verify + // and we have separate tinyca & integration tests. + By("parsing the config's cert & key data") + certs, err := tls.X509KeyPair(cfg.CertData, cfg.KeyData) + Expect(err).NotTo(HaveOccurred(), "config cert/key data should be valid key pair") + cert, err := x509.ParseCertificate(certs.Certificate[0]) // re-parse cause .Leaf isn't saved + Expect(err).NotTo(HaveOccurred()) + + By("starting and loading the CA cert") + dir, err := os.MkdirTemp("", "envtest_controlplane_*") + Expect(err).NotTo(HaveOccurred(), "should be able to provision a temp dir") + if dir != "" { + defer os.RemoveAll(dir) + } + Expect(authn.Configure(dir, process.EmptyArguments())).To(Succeed()) + Expect(authn.Start()).To(Succeed()) + caCerts, err := kcert.CertsFromFile(filepath.Join(dir, "client-cert-auth-ca.crt")) + Expect(err).NotTo(HaveOccurred(), "should be able to read the CA cert file))))") + Expect(cert.CheckSignatureFrom(caCerts[0])).To(Succeed(), "the config's cert should be signed by the written CA") + }) + + It("should copy the configuration from the base CA without modifying it", func() { + By("creating a user and checking the output config") + base := &rest.Config{Burst: 30} + cfg, err := authn.AddUser(user, base) + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + Expect(cfg.Burst).To(Equal(30)) + + By("mutating the base and verifying the cfg doesn't change") + base.Burst = 8675 + Expect(cfg.Burst).To(Equal(30)) + }) + }) + + Context("after starting", func() { + var dir string + BeforeEach(func() { + By("setting up a temp dir & starting with it") + var err error + dir, err = os.MkdirTemp("", "envtest_controlplane_*") + Expect(err).NotTo(HaveOccurred(), "should be able to provision a temp dir") + Expect(authn.Configure(dir, process.EmptyArguments())).To(Succeed()) + Expect(authn.Start()).To(Succeed()) + }) + AfterEach(func() { + if dir != "" { + defer os.RemoveAll(dir) + } + }) + + It("should yield a REST config that contains certs valid for the written CA", func() { + cfg, err := authn.AddUser(user, &rest.Config{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + Expect(cfg.CertData).NotTo(BeEmpty()) + Expect(cfg.KeyData).NotTo(BeEmpty()) + + // double-check the cert (assume the key is fine if it's present + // and the cert is also present, cause it's more annoying to verify + // and we have separate tinyca & integration tests. + By("parsing the config's cert & key data") + certs, err := tls.X509KeyPair(cfg.CertData, cfg.KeyData) + Expect(err).NotTo(HaveOccurred(), "config cert/key data should be valid key pair") + cert, err := x509.ParseCertificate(certs.Certificate[0]) // re-parse cause .Leaf isn't saved + Expect(err).NotTo(HaveOccurred()) + + By("loading the CA cert") + caCerts, err := kcert.CertsFromFile(filepath.Join(dir, "client-cert-auth-ca.crt")) + Expect(err).NotTo(HaveOccurred(), "should be able to read the CA cert file))))") + Expect(cert.CheckSignatureFrom(caCerts[0])).To(Succeed(), "the config's cert should be signed by the written CA") + }) + + It("should copy the configuration from the base CA without modifying it", func() { + By("creating a user and checking the output config") + base := &rest.Config{Burst: 30} + cfg, err := authn.AddUser(user, base) + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + Expect(cfg.Burst).To(Equal(30)) + + By("mutating the base and verifying the cfg doesn't change") + base.Burst = 8675 + Expect(cfg.Burst).To(Equal(30)) + }) + }) + }) +}) diff --git a/pkg/internal/testing/controlplane/controlplane_suite_test.go b/pkg/internal/testing/controlplane/controlplane_suite_test.go new file mode 100644 index 0000000000..9ac69047f0 --- /dev/null +++ b/pkg/internal/testing/controlplane/controlplane_suite_test.go @@ -0,0 +1,30 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestIntegration(t *testing.T) { + t.Parallel() + RegisterFailHandler(Fail) + RunSpecs(t, "Control Plane Standup Unit Tests") +} diff --git a/pkg/internal/testing/controlplane/etcd.go b/pkg/internal/testing/controlplane/etcd.go new file mode 100644 index 0000000000..98ffe3ac5e --- /dev/null +++ b/pkg/internal/testing/controlplane/etcd.go @@ -0,0 +1,206 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "io" + "net" + "net/url" + "strconv" + "time" + + "sigs.k8s.io/controller-runtime/pkg/internal/testing/addr" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +// Etcd knows how to run an etcd server. +type Etcd struct { + // URL is the address the Etcd should listen on for client connections. + // + // If this is not specified, we default to a random free port on localhost. + URL *url.URL + + // Path is the path to the etcd binary. + // + // If this is left as the empty string, we will attempt to locate a binary, + // by checking for the TEST_ASSET_ETCD environment variable, and the default + // test assets directory. See the "Binaries" section above (in doc.go) for + // details. + Path string + + // Args is a list of arguments which will passed to the Etcd binary. Before + // they are passed on, the`y will be evaluated as go-template strings. This + // means you can use fields which are defined and exported on this Etcd + // struct (e.g. "--data-dir={{ .Dir }}"). + // Those templates will be evaluated after the defaulting of the Etcd's + // fields has already happened and just before the binary actually gets + // started. Thus you have access to calculated fields like `URL` and others. + // + // If not specified, the minimal set of arguments to run the Etcd will be + // used. + // + // They will be loaded into the same argument set as Configure. Each flag + // will be Append-ed to the configured arguments just before launch. + // + // Deprecated: use Configure instead. + Args []string + + // DataDir is a path to a directory in which etcd can store its state. + // + // If left unspecified, then the Start() method will create a fresh temporary + // directory, and the Stop() method will clean it up. + DataDir string + + // StartTimeout, StopTimeout specify the time the Etcd is allowed to + // take when starting and stopping before an error is emitted. + // + // If not specified, these default to 20 seconds. + StartTimeout time.Duration + StopTimeout time.Duration + + // Out, Err specify where Etcd should write its StdOut, StdErr to. + // + // If not specified, the output will be discarded. + Out io.Writer + Err io.Writer + + // processState contains the actual details about this running process + processState *process.State + + // args contains the structured arguments to use for running etcd. + // Lazily initialized by .Configure(), Defaulted eventually with .defaultArgs() + args *process.Arguments + + // listenPeerURL is the address the Etcd should listen on for peer connections. + // It's automatically generated and a random port is picked during execution. + listenPeerURL *url.URL +} + +// Start starts the etcd, waits for it to come up, and returns an error, if one +// occurred. +func (e *Etcd) Start() error { + if err := e.setProcessState(); err != nil { + return err + } + return e.processState.Start(e.Out, e.Err) +} + +func (e *Etcd) setProcessState() error { + e.processState = &process.State{ + Dir: e.DataDir, + Path: e.Path, + StartTimeout: e.StartTimeout, + StopTimeout: e.StopTimeout, + } + + // unconditionally re-set this so we can successfully restart + // TODO(directxman12): we supported this in the past, but do we actually + // want to support re-using an API server object to restart? The loss + // of provisioned users is surprising to say the least. + if err := e.processState.Init("etcd"); err != nil { + return err + } + + // Set the listen url. + if e.URL == nil { + port, host, err := addr.Suggest("") + if err != nil { + return err + } + e.URL = &url.URL{ + Scheme: "http", + Host: net.JoinHostPort(host, strconv.Itoa(port)), + } + } + + // Set the listen peer URL. + { + port, host, err := addr.Suggest("") + if err != nil { + return err + } + e.listenPeerURL = &url.URL{ + Scheme: "http", + Host: net.JoinHostPort(host, strconv.Itoa(port)), + } + } + + // can use /health as of etcd 3.3.0 + e.processState.HealthCheck.URL = *e.URL + e.processState.HealthCheck.Path = "/health" + + e.DataDir = e.processState.Dir + e.Path = e.processState.Path + e.StartTimeout = e.processState.StartTimeout + e.StopTimeout = e.processState.StopTimeout + + var err error + e.processState.Args, e.Args, err = process.TemplateAndArguments(e.Args, e.Configure(), process.TemplateDefaults{ //nolint:staticcheck + Data: e, + Defaults: e.defaultArgs(), + }) + return err +} + +// Stop stops this process gracefully, waits for its termination, and cleans up +// the DataDir if necessary. +func (e *Etcd) Stop() error { + if e.processState == nil { + return nil + } + + if e.processState.DirNeedsCleaning { + e.DataDir = "" // reset the directory if it was randomly allocated, so that we can safely restart + } + return e.processState.Stop() +} + +func (e *Etcd) defaultArgs() map[string][]string { + args := map[string][]string{ + "listen-peer-urls": {e.listenPeerURL.String()}, + "data-dir": {e.DataDir}, + } + if e.URL != nil { + args["advertise-client-urls"] = []string{e.URL.String()} + args["listen-client-urls"] = []string{e.URL.String()} + } + + // Add unsafe no fsync, available from etcd 3.5 + if ok, _ := e.processState.CheckFlag("unsafe-no-fsync"); ok { + args["unsafe-no-fsync"] = []string{"true"} + } + return args +} + +// Configure returns Arguments that may be used to customize the +// flags used to launch etcd. A set of defaults will +// be applied underneath. +func (e *Etcd) Configure() *process.Arguments { + if e.args == nil { + e.args = process.EmptyArguments() + } + return e.args +} + +// EtcdDefaultArgs exposes the default args for Etcd so that you +// can use those to append your own additional arguments. +var EtcdDefaultArgs = []string{ + "--listen-peer-urls=http://localhost:0", + "--advertise-client-urls={{ if .URL }}{{ .URL.String }}{{ end }}", + "--listen-client-urls={{ if .URL }}{{ .URL.String }}{{ end }}", + "--data-dir={{ .DataDir }}", +} diff --git a/pkg/internal/testing/controlplane/etcd_test.go b/pkg/internal/testing/controlplane/etcd_test.go new file mode 100644 index 0000000000..7c7c7561ff --- /dev/null +++ b/pkg/internal/testing/controlplane/etcd_test.go @@ -0,0 +1,36 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + . "sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane" +) + +var _ = Describe("etcd", func() { + // basic coherence test + It("should start and stop successfully", func() { + etcd := &Etcd{} + Expect(etcd.Start()).To(Succeed()) + defer func() { + Expect(etcd.Stop()).To(Succeed()) + }() + Expect(etcd.URL).NotTo(BeNil()) + }) +}) diff --git a/pkg/internal/testing/controlplane/kubectl.go b/pkg/internal/testing/controlplane/kubectl.go new file mode 100644 index 0000000000..a41bb77c4d --- /dev/null +++ b/pkg/internal/testing/controlplane/kubectl.go @@ -0,0 +1,120 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "bytes" + "fmt" + "io" + "net/url" + "os/exec" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + kcapi "k8s.io/client-go/tools/clientcmd/api" + + "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +const ( + envtestName = "envtest" +) + +// KubeConfigFromREST reverse-engineers a kubeconfig file from a rest.Config. +// The options are tailored towards the rest.Configs we generate, so they're +// not broadly applicable. +// +// This is not intended to be exposed beyond internal for the above reasons. +func KubeConfigFromREST(cfg *rest.Config) ([]byte, error) { + kubeConfig := kcapi.NewConfig() + protocol := "https" + if !rest.IsConfigTransportTLS(*cfg) { + protocol = "http" + } + + // cfg.Host is a URL, so we need to parse it so we can properly append the API path + baseURL, err := url.Parse(cfg.Host) + if err != nil { + return nil, fmt.Errorf("unable to interpret config's host value as a URL: %w", err) + } + + kubeConfig.Clusters[envtestName] = &kcapi.Cluster{ + // TODO(directxman12): if client-go ever decides to expose defaultServerUrlFor(config), + // we can just use that. Note that this is not the same as the public DefaultServerURL, + // which requires us to pass a bunch of stuff in manually. + Server: (&url.URL{Scheme: protocol, Host: baseURL.Host, Path: cfg.APIPath}).String(), + CertificateAuthorityData: cfg.CAData, + } + kubeConfig.AuthInfos[envtestName] = &kcapi.AuthInfo{ + // try to cover all auth strategies that aren't plugins + ClientCertificateData: cfg.CertData, + ClientKeyData: cfg.KeyData, + Token: cfg.BearerToken, + Username: cfg.Username, + Password: cfg.Password, + } + kcCtx := kcapi.NewContext() + kcCtx.Cluster = envtestName + kcCtx.AuthInfo = envtestName + kubeConfig.Contexts[envtestName] = kcCtx + kubeConfig.CurrentContext = envtestName + + contents, err := clientcmd.Write(*kubeConfig) + if err != nil { + return nil, fmt.Errorf("unable to serialize kubeconfig file: %w", err) + } + return contents, nil +} + +// KubeCtl is a wrapper around the kubectl binary. +type KubeCtl struct { + // Path where the kubectl binary can be found. + // + // If this is left empty, we will attempt to locate a binary, by checking for + // the TEST_ASSET_KUBECTL environment variable, and the default test assets + // directory. See the "Binaries" section above (in doc.go) for details. + Path string + + // Opts can be used to configure additional flags which will be used each + // time the wrapped binary is called. + // + // For example, you might want to use this to set the URL of the APIServer to + // connect to. + Opts []string +} + +// Run executes the wrapped binary with some preconfigured options and the +// arguments given to this method. It returns Readers for the stdout and +// stderr. +func (k *KubeCtl) Run(args ...string) (stdout, stderr io.Reader, err error) { + if k.Path == "" { + k.Path = process.BinPathFinder("kubectl", "") + } + + stdoutBuffer := &bytes.Buffer{} + stderrBuffer := &bytes.Buffer{} + allArgs := append(k.Opts, args...) + + cmd := exec.Command(k.Path, allArgs...) + cmd.Stdout = stdoutBuffer + cmd.Stderr = stderrBuffer + cmd.SysProcAttr = process.GetSysProcAttr() + + err = cmd.Run() + + return stdoutBuffer, stderrBuffer, err +} diff --git a/pkg/internal/testing/controlplane/kubectl_test.go b/pkg/internal/testing/controlplane/kubectl_test.go new file mode 100644 index 0000000000..5484bc31a1 --- /dev/null +++ b/pkg/internal/testing/controlplane/kubectl_test.go @@ -0,0 +1,138 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane_test + +import ( + "io" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + ccapi "k8s.io/client-go/tools/clientcmd/api" + + . "sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane" +) + +var _ = Describe("Kubectl", func() { + It("runs kubectl", func() { + k := &KubeCtl{Path: "bash"} + args := []string{"-c", "echo 'something'"} + stdout, stderr, err := k.Run(args...) + Expect(err).NotTo(HaveOccurred()) + Expect(stdout).To(ContainSubstring("something")) + bytes, err := io.ReadAll(stderr) + Expect(err).NotTo(HaveOccurred()) + Expect(bytes).To(BeEmpty()) + }) + + Context("when the command returns a non-zero exit code", func() { + It("returns an error", func() { + k := &KubeCtl{Path: "bash"} + args := []string{ + "-c", "echo 'this is StdErr' >&2; echo 'but this is StdOut' >&1; exit 66", + } + + stdout, stderr, err := k.Run(args...) + + Expect(err).To(MatchError(ContainSubstring("exit status 66"))) + + Expect(stdout).To(ContainSubstring("but this is StdOut")) + Expect(stderr).To(ContainSubstring("this is StdErr")) + }) + }) +}) + +var _ = Describe("KubeConfigFromREST", func() { + var ( + restCfg *rest.Config + rawCfg []byte + cfg *ccapi.Config + ) + + BeforeEach(func() { + restCfg = &rest.Config{ + Host: "https://some-host:8675", + APIPath: "/some-prefix", + TLSClientConfig: rest.TLSClientConfig{ + CertData: []byte("cert"), + KeyData: []byte("key"), + CAData: []byte("ca-cert"), + }, + BearerToken: "some-tok", + Username: "some-user", + Password: "some-password", + } + }) + + JustBeforeEach(func() { + var err error + rawCfg, err = KubeConfigFromREST(restCfg) + Expect(err).NotTo(HaveOccurred(), "should be able to convert & serialize the kubeconfig") + + cfg, err = clientcmd.Load(rawCfg) + Expect(err).NotTo(HaveOccurred(), "should be able to deserialize the generated kubeconfig") + }) + + It("should set up a context, and set it as the current one", func() { + By("checking that the current context exists") + Expect(cfg.CurrentContext).NotTo(BeEmpty(), "should have a current context") + Expect(cfg.Contexts).To(HaveKeyWithValue(cfg.CurrentContext, Not(BeNil())), "the current context should exist as a context") + + By("checking that it points to valid info") + currCtx := cfg.Contexts[cfg.CurrentContext] + Expect(currCtx).To(PointTo(MatchFields(IgnoreExtras, Fields{ + "Cluster": Not(BeEmpty()), + "AuthInfo": Not(BeEmpty()), + }))) + + Expect(cfg.Clusters).To(HaveKeyWithValue(currCtx.Cluster, Not(BeNil())), "should point to a cluster") + Expect(cfg.AuthInfos).To(HaveKeyWithValue(currCtx.AuthInfo, Not(BeNil())), "should point to a user") + }) + + Context("when no TLS is enabled", func() { + BeforeEach(func() { + restCfg.Host = "http://some-host:8675" + restCfg.TLSClientConfig = rest.TLSClientConfig{} + }) + + It("should use http in the server url", func() { + cluster := cfg.Clusters[cfg.Contexts[cfg.CurrentContext].Cluster] + Expect(cluster.Server).To(HavePrefix("http://")) + }) + }) + + It("configure the current context to point to the given REST config's server, with CA data", func() { + cluster := cfg.Clusters[cfg.Contexts[cfg.CurrentContext].Cluster] + Expect(cluster).To(PointTo(MatchFields(IgnoreExtras, Fields{ + "Server": Equal("https://some-host:8675/some-prefix"), + "CertificateAuthorityData": Equal([]byte("ca-cert")), + }))) + }) + + It("should copy all non-plugin auth info over", func() { + user := cfg.AuthInfos[cfg.Contexts[cfg.CurrentContext].AuthInfo] + Expect(user).To(PointTo(MatchFields(IgnoreExtras, Fields{ + "ClientCertificateData": Equal([]byte("cert")), + "ClientKeyData": Equal([]byte("key")), + "Token": Equal("some-tok"), + "Username": Equal("some-user"), + "Password": Equal("some-password"), + }))) + }) +}) diff --git a/pkg/internal/testing/controlplane/plane.go b/pkg/internal/testing/controlplane/plane.go new file mode 100644 index 0000000000..456183a7a3 --- /dev/null +++ b/pkg/internal/testing/controlplane/plane.go @@ -0,0 +1,259 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "fmt" + "net/url" + "os" + + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/certs" +) + +// NewTinyCA creates a new a tiny CA utility for provisioning serving certs and client certs FOR TESTING ONLY. +// Don't use this for anything else! +var NewTinyCA = certs.NewTinyCA + +// ControlPlane is a struct that knows how to start your test control plane. +// +// Right now, that means Etcd and your APIServer. This is likely to increase in +// future. +type ControlPlane struct { + APIServer *APIServer + Etcd *Etcd + + // Kubectl will override the default asset search path for kubectl + KubectlPath string + + // for the deprecated methods (Kubectl, etc) + defaultUserCfg *rest.Config + defaultUserKubectl *KubeCtl +} + +// Start will start your control plane processes. To stop them, call Stop(). +func (f *ControlPlane) Start() (retErr error) { + if f.Etcd == nil { + f.Etcd = &Etcd{} + } + if err := f.Etcd.Start(); err != nil { + return err + } + defer func() { + if retErr != nil { + _ = f.Etcd.Stop() + } + }() + + if f.APIServer == nil { + f.APIServer = &APIServer{} + } + f.APIServer.EtcdURL = f.Etcd.URL + if err := f.APIServer.Start(); err != nil { + return err + } + defer func() { + if retErr != nil { + _ = f.APIServer.Stop() + } + }() + + // provision the default user -- can be removed when the related + // methods are removed. The default user has admin permissions to + // mimic legacy no-authz setups. + user, err := f.AddUser(User{Name: "default", Groups: []string{"system:masters"}}, &rest.Config{}) + if err != nil { + return fmt.Errorf("unable to provision the default (legacy) user: %w", err) + } + kubectl, err := user.Kubectl() + if err != nil { + return fmt.Errorf("unable to provision the default (legacy) kubeconfig: %w", err) + } + f.defaultUserCfg = user.Config() + f.defaultUserKubectl = kubectl + return nil +} + +// Stop will stop your control plane processes, and clean up their data. +func (f *ControlPlane) Stop() error { + var errList []error + + if f.APIServer != nil { + if err := f.APIServer.Stop(); err != nil { + errList = append(errList, err) + } + } + + if f.Etcd != nil { + if err := f.Etcd.Stop(); err != nil { + errList = append(errList, err) + } + } + + return kerrors.NewAggregate(errList) +} + +// APIURL returns the URL you should connect to to talk to your API server. +// +// If insecure serving is configured, this will contain the insecure port. +// Otherwise, it will contain the secure port. +// +// Deprecated: use AddUser instead, or APIServer.{Ins|S}ecureServing.URL if +// you really want just the URL. +func (f *ControlPlane) APIURL() *url.URL { + return f.APIServer.URL +} + +// KubeCtl returns a pre-configured KubeCtl, ready to connect to this +// ControlPlane. +// +// Deprecated: use AddUser & AuthenticatedUser.Kubectl instead. +func (f *ControlPlane) KubeCtl() *KubeCtl { + return f.defaultUserKubectl +} + +// RESTClientConfig returns a pre-configured restconfig, ready to connect to +// this ControlPlane. +// +// Deprecated: use AddUser & AuthenticatedUser.Config instead. +func (f *ControlPlane) RESTClientConfig() (*rest.Config, error) { + return f.defaultUserCfg, nil +} + +// AuthenticatedUser contains access information for an provisioned user, +// including REST config, kubeconfig contents, and access to a KubeCtl instance. +// +// It's not "safe" to use the methods on this till after the API server has been +// started (due to certificate initialization and such). The various methods will +// panic if this is done. +type AuthenticatedUser struct { + // cfg is the rest.Config for connecting to the API server. It's lazily initialized. + cfg *rest.Config + // cfgIsComplete indicates the cfg has had late-initialized fields (e.g. + // API server CA data) initialized. + cfgIsComplete bool + + // apiServer is a handle to the APIServer that's used when finalizing cfg + // and producing the kubectl instance. + plane *ControlPlane + + // kubectl is our existing, provisioned kubectl. We don't provision one + // till someone actually asks for it. + kubectl *KubeCtl +} + +// Config returns the REST config that can be used to connect to the API server +// as this user. +// +// Will panic if used before the API server is started. +func (u *AuthenticatedUser) Config() *rest.Config { + // NB(directxman12): we choose to panic here for ergonomics sake, and because there's + // not really much you can do to "handle" this error. This machinery is intended to be + // used in tests anyway, so panicing is not a particularly big deal. + if u.cfgIsComplete { + return u.cfg + } + if len(u.plane.APIServer.SecureServing.CA) == 0 { + panic("the API server has not yet been started, please do that before accessing connection details") + } + + u.cfg.CAData = u.plane.APIServer.SecureServing.CA + u.cfg.Host = u.plane.APIServer.SecureServing.URL("https", "/").String() + u.cfgIsComplete = true + return u.cfg +} + +// KubeConfig returns a KubeConfig that's roughly equivalent to this user's REST config. +// +// Will panic if used before the API server is started. +func (u AuthenticatedUser) KubeConfig() ([]byte, error) { + // NB(directxman12): we don't return the actual API object to avoid yet another + // piece of kubernetes API in our public API, and also because generally the thing + // you want to do with this is just write it out to a file for external debugging + // purposes, etc. + return KubeConfigFromREST(u.Config()) +} + +// Kubectl returns a KubeCtl instance for talking to the API server as this user. It uses +// a kubeconfig equivalent to that returned by .KubeConfig. +// +// Will panic if used before the API server is started. +func (u *AuthenticatedUser) Kubectl() (*KubeCtl, error) { + if u.kubectl != nil { + return u.kubectl, nil + } + if len(u.plane.APIServer.CertDir) == 0 { + panic("the API server has not yet been started, please do that before accessing connection details") + } + + // cleaning this up is handled when our tmpDir is deleted + out, err := os.CreateTemp(u.plane.APIServer.CertDir, "*.kubecfg") + if err != nil { + return nil, fmt.Errorf("unable to create file for kubeconfig: %w", err) + } + defer out.Close() + contents, err := KubeConfigFromREST(u.Config()) + if err != nil { + return nil, err + } + if _, err := out.Write(contents); err != nil { + return nil, fmt.Errorf("unable to write kubeconfig to disk at %s: %w", out.Name(), err) + } + k := &KubeCtl{ + Path: u.plane.KubectlPath, + } + k.Opts = append(k.Opts, fmt.Sprintf("--kubeconfig=%s", out.Name())) + u.kubectl = k + return k, nil +} + +// AddUser provisions a new user in the cluster. It uses the APIServer's authentication +// strategy -- see APIServer.SecureServing.Authn. +// +// Unlike AddUser, it's safe to pass a nil rest.Config here if you have no +// particular opinions about the config. +// +// The default authentication strategy is not guaranteed to any specific strategy, but it is +// guaranteed to be callable both before and after Start has been called (but, as noted in the +// AuthenticatedUser docs, the given user objects are only valid after Start has been called). +func (f *ControlPlane) AddUser(user User, baseConfig *rest.Config) (*AuthenticatedUser, error) { + if f.GetAPIServer().SecureServing.Authn == nil { + return nil, fmt.Errorf("no API server authentication is configured yet. The API server defaults one when Start is called, did you mean to use that?") + } + + if baseConfig == nil { + baseConfig = &rest.Config{} + } + cfg, err := f.GetAPIServer().SecureServing.AddUser(user, baseConfig) + if err != nil { + return nil, err + } + + return &AuthenticatedUser{ + cfg: cfg, + plane: f, + }, nil +} + +// GetAPIServer returns this ControlPlane's APIServer, initializing it if necessary. +func (f *ControlPlane) GetAPIServer() *APIServer { + if f.APIServer == nil { + f.APIServer = &APIServer{} + } + return f.APIServer +} diff --git a/pkg/internal/testing/controlplane/plane_test.go b/pkg/internal/testing/controlplane/plane_test.go new file mode 100644 index 0000000000..a228e5a51c --- /dev/null +++ b/pkg/internal/testing/controlplane/plane_test.go @@ -0,0 +1,107 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + kauthn "k8s.io/api/authorization/v1" + + "sigs.k8s.io/controller-runtime/pkg/client" + . "sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane" +) + +var _ = Describe("Control Plane", func() { + It("should start and stop successfully with a default etcd & apiserver", func() { + plane := &ControlPlane{} + Expect(plane.Start()).To(Succeed()) + Expect(plane.Stop()).To(Succeed()) + }) + It("should use the given etcd & apiserver when starting, if present", func() { + apiServer := &APIServer{} + etcd := &Etcd{} + plane := &ControlPlane{ + APIServer: apiServer, + Etcd: etcd, + } + Expect(plane.Start()).To(Succeed()) + defer func() { Expect(plane.Stop()).To(Succeed()) }() + + Expect(plane.APIServer).To(BeIdenticalTo(apiServer)) + Expect(plane.Etcd).To(BeIdenticalTo(etcd)) + }) + + It("should be able to restart", func() { + // NB(directxman12): currently restarting invalidates all current users + // when using CertAuthn. We need to support restarting as per our previous + // contract, but it's not clear how much else we actually need to handle, or + // whether or not this is a safe operation. + plane := &ControlPlane{} + Expect(plane.Start()).To(Succeed()) + Expect(plane.Stop()).To(Succeed()) + Expect(plane.Start()).To(Succeed()) + Expect(plane.Stop()).To(Succeed()) + }) + + Context("after having started", func() { + var plane *ControlPlane + BeforeEach(func() { + plane = &ControlPlane{} + Expect(plane.Start()).To(Succeed()) + }) + AfterEach(func() { + Expect(plane.Stop()).To(Succeed()) + }) + + It("should provision a working legacy user and legacy kubectl", func(ctx SpecContext) { + By("grabbing the legacy kubectl") + Expect(plane.KubeCtl()).NotTo(BeNil()) + + By("grabbing the legacy REST config and testing it") + cfg, err := plane.RESTClientConfig() + Expect(err).NotTo(HaveOccurred(), "should be able to grab the legacy REST config") + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred(), "should be able to create a client") + + sar := &kauthn.SelfSubjectAccessReview{ + Spec: kauthn.SelfSubjectAccessReviewSpec{ + ResourceAttributes: &kauthn.ResourceAttributes{ + Verb: "*", + Group: "*", + Version: "*", + Resource: "*", + }, + }, + } + Expect(cl.Create(ctx, sar)).To(Succeed(), "should be able to make a Self-SAR") + Expect(sar.Status.Allowed).To(BeTrue(), "admin user should be able to do everything") + }) + + // TODO(directxman12): more explicit tests for AddUser -- it's tested indirectly via the + // legacy user flow, but we should be explicit + + Describe("adding users", func() { + PIt("should be able to provision new users that have a corresponding REST config and & kubectl", func() { + + }) + + PIt("should produce a default base REST config if none is given to add", func() { + + }) + }) + }) +}) diff --git a/pkg/internal/testing/controlplane/testdata/fake-1.19-apiserver.sh b/pkg/internal/testing/controlplane/testdata/fake-1.19-apiserver.sh new file mode 100755 index 0000000000..8b71661185 --- /dev/null +++ b/pkg/internal/testing/controlplane/testdata/fake-1.19-apiserver.sh @@ -0,0 +1,312 @@ +#!/usr/bin/env sh + +cat </=true|false for a specific API group and version (e.g. apps/v1=true) + api/all=true|false controls all API versions + api/ga=true|false controls all API versions of the form v[0-9]+ + api/beta=true|false controls all API versions of the form v[0-9]+beta[0-9]+ + api/alpha=true|false controls all API versions of the form v[0-9]+alpha[0-9]+ + api/legacy is deprecated, and will be removed in a future version + +Egress selector flags: + + --egress-selector-config-file string File with apiserver egress selector configuration. + +Admission flags: + + --admission-control strings Admission is divided into two phases. In the first phase, only mutating admission plugins run. In the second phase, only validating admission plugins run. The names in the below list may represent a validating plugin, a mutating plugin, or both. The order of plugins in which they are passed to this flag does not matter. Comma-delimited list of: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodPreset, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. (DEPRECATED: Use --enable-admission-plugins or --disable-admission-plugins instead. Will be removed in a future version.) + --admission-control-config-file string File with admission control configuration. + --disable-admission-plugins strings admission plugins that should be disabled although they are in the default enabled plugins list (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodPreset, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + --enable-admission-plugins strings admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodPreset, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + +Metrics flags: + + --show-hidden-metrics-for-version string The previous version for which you want to show hidden metrics. Only the previous minor version is meaningful, other values will not be allowed. The format is ., e.g.: '1.16'. The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, rather than being surprised when they are permanently removed in the release after that. + +Logs flags: + + --logging-format string Sets the log format. Permitted formats: "json", "text". + Non-default formats don't honor these flags: --add_dir_header, --alsologtostderr, --log_backtrace_at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --skip_headers, --skip_log_headers, --stderrthreshold, --vmodule, --log-flush-frequency. + Non-default choices are currently alpha and subject to change without warning. (default "text") + +Misc flags: + + --allow-privileged If true, allow privileged containers. [default=false] + --apiserver-count int The number of apiservers running in the cluster, must be a positive number. (In use when --endpoint-reconciler-type=master-count is enabled.) (default 1) + --enable-aggregator-routing Turns on aggregator routing requests to endpoints IP rather than cluster IP. + --endpoint-reconciler-type string Use an endpoint reconciler (master-count, lease, none) (default "lease") + --event-ttl duration Amount of time to retain events. (default 1h0m0s) + --kubelet-certificate-authority string Path to a cert file for the certificate authority. + --kubelet-client-certificate string Path to a client cert file for TLS. + --kubelet-client-key string Path to a client key file for TLS. + --kubelet-preferred-address-types strings List of the preferred NodeAddressTypes to use for kubelet connections. (default [Hostname,InternalDNS,InternalIP,ExternalDNS,ExternalIP]) + --kubelet-timeout duration Timeout for kubelet operations. (default 5s) + --kubernetes-service-node-port int If non-zero, the Kubernetes master service (which apiserver creates/maintains) will be of type NodePort, using this as the value of the port. If zero, the Kubernetes master service will be of type ClusterIP. + --max-connection-bytes-per-sec int If non-zero, throttle each user connection to this number of bytes/sec. Currently only applies to long-running requests. + --proxy-client-cert-file string Client certificate used to prove the identity of the aggregator or kube-apiserver when it must call out during a request. This includes proxying requests to a user api-server and calling out to webhook admission plugins. It is expected that this cert includes a signature from the CA in the --requestheader-client-ca-file flag. That CA is published in the 'extension-apiserver-authentication' configmap in the kube-system namespace. Components receiving calls from kube-aggregator should use that CA to perform their half of the mutual TLS verification. + --proxy-client-key-file string Private key for the client certificate used to prove the identity of the aggregator or kube-apiserver when it must call out during a request. This includes proxying requests to a user api-server and calling out to webhook admission plugins. + --service-account-signing-key-file string Path to the file that contains the current private key of the service account token issuer. The issuer will sign issued ID tokens with this private key. (Requires the 'TokenRequest' feature gate.) + --service-cluster-ip-range string A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes or pods. + --service-node-port-range portRange A port range to reserve for services with NodePort visibility. Example: '30000-32767'. Inclusive at both ends of the range. (default 30000-32767) + +Global flags: + + --add-dir-header If true, adds the file directory to the header of the log messages + --alsologtostderr log to standard error as well as files + -h, --help help for kube-apiserver + --log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log-dir string If non-empty, write log files in this directory + --log-file string If non-empty, use this log file + --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) + --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) + --logtostderr log to standard error instead of files (default true) + --skip-headers If true, avoid header prefixes in the log messages + --skip-log-headers If true, avoid headers when opening log files + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level number for the log level verbosity + --version version[=true] Print version information and quit + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + +EOF diff --git a/pkg/internal/testing/controlplane/testdata/fake-1.20-apiserver.sh b/pkg/internal/testing/controlplane/testdata/fake-1.20-apiserver.sh new file mode 100755 index 0000000000..112346cce6 --- /dev/null +++ b/pkg/internal/testing/controlplane/testdata/fake-1.20-apiserver.sh @@ -0,0 +1,318 @@ +#!/usr/bin/env sh + +cat </=true|false for a specific API group and version (e.g. apps/v1=true) + api/all=true|false controls all API versions + api/ga=true|false controls all API versions of the form v[0-9]+ + api/beta=true|false controls all API versions of the form v[0-9]+beta[0-9]+ + api/alpha=true|false controls all API versions of the form v[0-9]+alpha[0-9]+ + api/legacy is deprecated, and will be removed in a future version + +Egress selector flags: + + --egress-selector-config-file string File with apiserver egress selector configuration. + +Admission flags: + + --admission-control strings Admission is divided into two phases. In the first phase, only mutating admission plugins run. In the second phase, only validating admission plugins run. The names in the below list may represent a validating plugin, a mutating plugin, or both. The order of plugins in which they are passed to this flag does not matter. Comma-delimited list of: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. (DEPRECATED: Use --enable-admission-plugins or --disable-admission-plugins instead. Will be removed in a future version.) + --admission-control-config-file string File with admission control configuration. + --disable-admission-plugins strings admission plugins that should be disabled although they are in the default enabled plugins list (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + --enable-admission-plugins strings admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + +Metrics flags: + + --show-hidden-metrics-for-version string The previous version for which you want to show hidden metrics. Only the previous minor version is meaningful, other values will not be allowed. The format is ., e.g.: '1.16'. The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, rather than being surprised when they are permanently removed in the release after that. + +Logs flags: + + --experimental-logging-sanitization [Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens). + Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production. + --logging-format string Sets the log format. Permitted formats: "json", "text". + Non-default formats don't honor these flags: --add_dir_header, --alsologtostderr, --log_backtrace_at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --one_output, --skip_headers, --skip_log_headers, --stderrthreshold, --vmodule, --log-flush-frequency. + Non-default choices are currently alpha and subject to change without warning. (default "text") + +Misc flags: + + --allow-privileged If true, allow privileged containers. [default=false] + --apiserver-count int The number of apiservers running in the cluster, must be a positive number. (In use when --endpoint-reconciler-type=master-count is enabled.) (default 1) + --enable-aggregator-routing Turns on aggregator routing requests to endpoints IP rather than cluster IP. + --endpoint-reconciler-type string Use an endpoint reconciler (master-count, lease, none) (default "lease") + --event-ttl duration Amount of time to retain events. (default 1h0m0s) + --identity-lease-duration-seconds int The duration of kube-apiserver lease in seconds, must be a positive number. (In use when the APIServerIdentity feature gate is enabled.) (default 3600) + --identity-lease-renew-interval-seconds int The interval of kube-apiserver renewing its lease in seconds, must be a positive number. (In use when the APIServerIdentity feature gate is enabled.) (default 10) + --kubelet-certificate-authority string Path to a cert file for the certificate authority. + --kubelet-client-certificate string Path to a client cert file for TLS. + --kubelet-client-key string Path to a client key file for TLS. + --kubelet-preferred-address-types strings List of the preferred NodeAddressTypes to use for kubelet connections. (default [Hostname,InternalDNS,InternalIP,ExternalDNS,ExternalIP]) + --kubelet-timeout duration Timeout for kubelet operations. (default 5s) + --kubernetes-service-node-port int If non-zero, the Kubernetes master service (which apiserver creates/maintains) will be of type NodePort, using this as the value of the port. If zero, the Kubernetes master service will be of type ClusterIP. + --max-connection-bytes-per-sec int If non-zero, throttle each user connection to this number of bytes/sec. Currently only applies to long-running requests. + --proxy-client-cert-file string Client certificate used to prove the identity of the aggregator or kube-apiserver when it must call out during a request. This includes proxying requests to a user api-server and calling out to webhook admission plugins. It is expected that this cert includes a signature from the CA in the --requestheader-client-ca-file flag. That CA is published in the 'extension-apiserver-authentication' configmap in the kube-system namespace. Components receiving calls from kube-aggregator should use that CA to perform their half of the mutual TLS verification. + --proxy-client-key-file string Private key for the client certificate used to prove the identity of the aggregator or kube-apiserver when it must call out during a request. This includes proxying requests to a user api-server and calling out to webhook admission plugins. + --service-account-signing-key-file string Path to the file that contains the current private key of the service account token issuer. The issuer will sign issued ID tokens with this private key. + --service-cluster-ip-range string A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes or pods. + --service-node-port-range portRange A port range to reserve for services with NodePort visibility. Example: '30000-32767'. Inclusive at both ends of the range. (default 30000-32767) + +Global flags: + + --add-dir-header If true, adds the file directory to the header of the log messages + --alsologtostderr log to standard error as well as files + -h, --help help for kube-apiserver + --log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log-dir string If non-empty, write log files in this directory + --log-file string If non-empty, use this log file + --log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) + --log-flush-frequency duration Maximum number of seconds between log flushes (default 5s) + --logtostderr log to standard error instead of files (default true) + --one-output If true, only write logs to their native severity level (vs also writing to each lower severity level + --skip-headers If true, avoid header prefixes in the log messages + --skip-log-headers If true, avoid headers when opening log files + --stderrthreshold severity logs at or above this threshold go to stderr (default 2) + -v, --v Level number for the log level verbosity + --version version[=true] Print version information and quit + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging +EOF diff --git a/pkg/internal/testing/process/arguments.go b/pkg/internal/testing/process/arguments.go new file mode 100644 index 0000000000..391eec1fac --- /dev/null +++ b/pkg/internal/testing/process/arguments.go @@ -0,0 +1,340 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "bytes" + "html/template" + "sort" + "strings" +) + +// RenderTemplates returns an []string to render the templates +// +// Deprecated: will be removed in favor of Arguments. +func RenderTemplates(argTemplates []string, data interface{}) (args []string, err error) { + var t *template.Template + + for _, arg := range argTemplates { + t, err = template.New(arg).Parse(arg) + if err != nil { + args = nil + return + } + + buf := &bytes.Buffer{} + err = t.Execute(buf, data) + if err != nil { + args = nil + return + } + args = append(args, buf.String()) + } + + return +} + +// SliceToArguments converts a slice of arguments to structured arguments, +// appending each argument that starts with `--` and contains an `=` to the +// argument set (ignoring defaults), returning the rest. +// +// Deprecated: will be removed when RenderTemplates is removed. +func SliceToArguments(sliceArgs []string, args *Arguments) []string { + var rest []string + for i, arg := range sliceArgs { + if arg == "--" { + rest = append(rest, sliceArgs[i:]...) + return rest + } + // skip non-flag arguments, skip arguments w/o equals because we + // can't tell if the next argument should take a value + if !strings.HasPrefix(arg, "--") || !strings.Contains(arg, "=") { + rest = append(rest, arg) + continue + } + + parts := strings.SplitN(arg[2:], "=", 2) + name := parts[0] + val := parts[1] + + args.AppendNoDefaults(name, val) + } + + return rest +} + +// TemplateDefaults specifies defaults to be used for joining structured arguments with templates. +// +// Deprecated: will be removed when RenderTemplates is removed. +type TemplateDefaults struct { + // Data will be used to render the template. + Data interface{} + // Defaults will be used to default structured arguments if no template is passed. + Defaults map[string][]string + // MinimalDefaults will be used to default structured arguments if a template is passed. + // Use this for flags which *must* be present. + MinimalDefaults map[string][]string // for api server service-cluster-ip-range +} + +// TemplateAndArguments joins structured arguments and non-structured arguments, preserving existing +// behavior. Namely: +// +// 1. if templ has len > 0, it will be rendered against data +// 2. the rendered template values that look like `--foo=bar` will be split +// and appended to args, the rest will be kept around +// 3. the given args will be rendered as string form. If a template is given, +// no defaults will be used, otherwise defaults will be used +// 4. a result of [args..., rest...] will be returned +// +// It returns the resulting rendered arguments, plus the arguments that were +// not transferred to `args` during rendering. +// +// Deprecated: will be removed when RenderTemplates is removed. +func TemplateAndArguments(templ []string, args *Arguments, data TemplateDefaults) (allArgs []string, nonFlagishArgs []string, err error) { + if len(templ) == 0 { // 3 & 4 (no template case) + return args.AsStrings(data.Defaults), nil, nil + } + + // 1: render the template + rendered, err := RenderTemplates(templ, data.Data) + if err != nil { + return nil, nil, err + } + + // 2: filter out structured args and add them to args + rest := SliceToArguments(rendered, args) + + // 3 (template case): render structured args, no defaults (matching the + // legacy case where if Args was specified, no defaults were used) + res := args.AsStrings(data.MinimalDefaults) + + // 4: return the rendered structured args + all non-structured args + return append(res, rest...), rest, nil +} + +// EmptyArguments constructs an empty set of flags with no defaults. +func EmptyArguments() *Arguments { + return &Arguments{ + values: make(map[string]Arg), + } +} + +// Arguments are structured, overridable arguments. +// Each Arguments object contains some set of default arguments, which may +// be appended to, or overridden. +// +// When ready, you can serialize them to pass to exec.Command and friends using +// AsStrings. +// +// All flag-setting methods return the *same* instance of Arguments so that you +// can chain calls. +type Arguments struct { + // values contains the user-set values for the arguments. + // `values[key] = dontPass` means "don't pass this flag" + // `values[key] = passAsName` means "pass this flag without args like --key` + // `values[key] = []string{a, b, c}` means "--key=a --key=b --key=c` + // any values not explicitly set here will be copied from defaults on final rendering. + values map[string]Arg +} + +// Arg is an argument that has one or more values, +// and optionally falls back to default values. +type Arg interface { + // Append adds new values to this argument, returning + // a new instance contain the new value. The intermediate + // argument should generally be assumed to be consumed. + Append(vals ...string) Arg + // Get returns the full set of values, optionally including + // the passed in defaults. If it returns nil, this will be + // skipped. If it returns a non-nil empty slice, it'll be + // assumed that the argument should be passed as name-only. + Get(defaults []string) []string +} + +type userArg []string + +func (a userArg) Append(vals ...string) Arg { + return userArg(append(a, vals...)) //nolint:unconvert +} +func (a userArg) Get(_ []string) []string { + return []string(a) +} + +type defaultedArg []string + +func (a defaultedArg) Append(vals ...string) Arg { + return defaultedArg(append(a, vals...)) //nolint:unconvert +} +func (a defaultedArg) Get(defaults []string) []string { + res := append([]string(nil), defaults...) + return append(res, a...) +} + +type dontPassArg struct{} + +func (a dontPassArg) Append(vals ...string) Arg { + return userArg(vals) +} +func (dontPassArg) Get(_ []string) []string { + return nil +} + +type passAsNameArg struct{} + +func (a passAsNameArg) Append(_ ...string) Arg { + return passAsNameArg{} +} +func (passAsNameArg) Get(_ []string) []string { + return []string{} +} + +var ( + // DontPass indicates that the given argument will not actually be + // rendered. + DontPass Arg = dontPassArg{} + // PassAsName indicates that the given flag will be passed as `--key` + // without any value. + PassAsName Arg = passAsNameArg{} +) + +// AsStrings serializes this set of arguments to a slice of strings appropriate +// for passing to exec.Command and friends, making use of the given defaults +// as indicated for each particular argument. +// +// - Any flag in defaults that's not in Arguments will be present in the output +// - Any flag that's present in Arguments will be passed the corresponding +// defaults to do with as it will (ignore, append-to, suppress, etc). +func (a *Arguments) AsStrings(defaults map[string][]string) []string { + // sort for deterministic ordering + keysInOrder := make([]string, 0, len(defaults)+len(a.values)) + for key := range defaults { + if _, userSet := a.values[key]; userSet { + continue + } + keysInOrder = append(keysInOrder, key) + } + for key := range a.values { + keysInOrder = append(keysInOrder, key) + } + sort.Strings(keysInOrder) + + var res []string + for _, key := range keysInOrder { + vals := a.Get(key).Get(defaults[key]) + switch { + case vals == nil: // don't pass + continue + case len(vals) == 0: // pass as name + res = append(res, "--"+key) + default: + for _, val := range vals { + res = append(res, "--"+key+"="+val) + } + } + } + + return res +} + +// Get returns the value of the given flag. If nil, +// it will not be passed in AsString, otherwise: +// +// len == 0 --> `--key`, len > 0 --> `--key=val1 --key=val2 ...`. +func (a *Arguments) Get(key string) Arg { + if vals, ok := a.values[key]; ok { + return vals + } + return defaultedArg(nil) +} + +// Enable configures the given key to be passed as a "name-only" flag, +// like, `--key`. +func (a *Arguments) Enable(key string) *Arguments { + a.values[key] = PassAsName + return a +} + +// Disable prevents this flag from be passed. +func (a *Arguments) Disable(key string) *Arguments { + a.values[key] = DontPass + return a +} + +// Append adds additional values to this flag. If this flag has +// yet to be set, initial values will include defaults. If you want +// to intentionally ignore defaults/start from scratch, call AppendNoDefaults. +// +// Multiple values will look like `--key=value1 --key=value2 ...`. +func (a *Arguments) Append(key string, values ...string) *Arguments { + vals, present := a.values[key] + if !present { + vals = defaultedArg{} + } + a.values[key] = vals.Append(values...) + return a +} + +// AppendNoDefaults adds additional values to this flag. However, +// unlike Append, it will *not* copy values from defaults. +func (a *Arguments) AppendNoDefaults(key string, values ...string) *Arguments { + vals, present := a.values[key] + if !present { + vals = userArg{} + } + a.values[key] = vals.Append(values...) + return a +} + +// Set resets the given flag to the specified values, ignoring any existing +// values or defaults. +func (a *Arguments) Set(key string, values ...string) *Arguments { + a.values[key] = userArg(values) + return a +} + +// SetRaw sets the given flag to the given Arg value directly. Use this if +// you need to do some complicated deferred logic or something. +// +// Otherwise behaves like Set. +func (a *Arguments) SetRaw(key string, val Arg) *Arguments { + a.values[key] = val + return a +} + +// FuncArg is a basic implementation of Arg that can be used for custom argument logic, +// like pulling values out of APIServer, or dynamically calculating values just before +// launch. +// +// The given function will be mapped directly to Arg#Get, and will generally be +// used in conjunction with SetRaw. For example, to set `--some-flag` to the +// API server's CertDir, you could do: +// +// server.Configure().SetRaw("--some-flag", FuncArg(func(defaults []string) []string { +// return []string{server.CertDir} +// })) +// +// FuncArg ignores Appends; if you need to support appending values too, consider implementing +// Arg directly. +type FuncArg func([]string) []string + +// Append is a no-op for FuncArg, and just returns itself. +func (a FuncArg) Append(vals ...string) Arg { return a } + +// Get delegates functionality to the FuncArg function itself. +func (a FuncArg) Get(defaults []string) []string { + return a(defaults) +} diff --git a/pkg/internal/testing/process/arguments_test.go b/pkg/internal/testing/process/arguments_test.go new file mode 100644 index 0000000000..b513cbdf86 --- /dev/null +++ b/pkg/internal/testing/process/arguments_test.go @@ -0,0 +1,346 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process_test + +import ( + "net/url" + "strings" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + . "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +var _ = Describe("Arguments Templates", func() { + It("templates URLs", func() { + templates := []string{ + "plain URL: {{ .SomeURL }}", + "method on URL: {{ .SomeURL.Hostname }}", + "empty URL: {{ .EmptyURL }}", + "handled empty URL: {{- if .EmptyURL }}{{ .EmptyURL }}{{ end }}", + } + data := struct { + SomeURL *url.URL + EmptyURL *url.URL + }{ + &url.URL{Scheme: "https", Host: "the.host.name:3456"}, + nil, + } + + out, err := RenderTemplates(templates, data) + Expect(err).NotTo(HaveOccurred()) + Expect(out).To(BeEquivalentTo([]string{ + "plain URL: https://the.host.name:3456", + "method on URL: the.host.name", + "empty URL: <nil>", + "handled empty URL:", + })) + }) + + It("templates strings", func() { + templates := []string{ + "a string: {{ .SomeString }}", + "empty string: {{- .EmptyString }}", + } + data := struct { + SomeString string + EmptyString string + }{ + "this is some random string", + "", + } + + out, err := RenderTemplates(templates, data) + Expect(err).NotTo(HaveOccurred()) + Expect(out).To(BeEquivalentTo([]string{ + "a string: this is some random string", + "empty string:", + })) + }) + + It("has no access to unexported fields", func() { + templates := []string{ + "this is just a string", + "this blows up {{ .test }}", + } + data := struct{ test string }{"ooops private"} + + out, err := RenderTemplates(templates, data) + Expect(out).To(BeEmpty()) + Expect(err).To(MatchError( + ContainSubstring("is an unexported field of struct"), + )) + }) + + It("errors when field cannot be found", func() { + templates := []string{"this does {{ .NotExist }}"} + data := struct{ Unused string }{"unused"} + + out, err := RenderTemplates(templates, data) + Expect(out).To(BeEmpty()) + Expect(err).To(MatchError( + ContainSubstring("can't evaluate field"), + )) + }) + + Context("when joining with structured Arguments", func() { + var ( + args *Arguments + templ = []string{ + "--cheese=parmesean", + "-om", + "nom nom nom", + "--sharpness={{ .sharpness }}", + } + data = TemplateDefaults{ + Data: map[string]string{"sharpness": "extra"}, + Defaults: map[string][]string{ + "cracker": {"ritz"}, + "pickle": {"kosher-dill"}, + }, + MinimalDefaults: map[string][]string{ + "pickle": {"kosher-dill"}, + }, + } + ) + BeforeEach(func() { + args = EmptyArguments() + }) + + Context("when a template is given", func() { + It("should use minimal defaults", func() { + all, _, err := TemplateAndArguments(templ, args, data) + Expect(err).NotTo(HaveOccurred()) + Expect(all).To(SatisfyAll( + Not(ContainElement("--cracker=ritz")), + ContainElement("--pickle=kosher-dill"), + )) + }) + + It("should render the template against the data", func() { + all, _, err := TemplateAndArguments(templ, args, data) + Expect(err).NotTo(HaveOccurred()) + Expect(all).To(ContainElements( + "--sharpness=extra", + )) + }) + + It("should append the rendered template to structured arguments", func() { + args.Append("cheese", "cheddar") + + all, _, err := TemplateAndArguments(templ, args, data) + Expect(err).NotTo(HaveOccurred()) + Expect(all).To(Equal([]string{ + "--cheese=cheddar", + "--cheese=parmesean", + "--pickle=kosher-dill", + "--sharpness=extra", + "-om", + "nom nom nom", + })) + }) + + It("should indicate which arguments were not able to be converted to structured flags", func() { + _, rest, err := TemplateAndArguments(templ, args, data) + Expect(err).NotTo(HaveOccurred()) + Expect(rest).To(Equal([]string{"-om", "nom nom nom"})) + + }) + }) + + Context("when no template is given", func() { + It("should render the structured arguments with the given defaults", func() { + args. + Append("cheese", "cheddar", "parmesean"). + Append("cracker", "triscuit") + + Expect(TemplateAndArguments(nil, args, data)).To(Equal([]string{ + "--cheese=cheddar", + "--cheese=parmesean", + "--cracker=ritz", + "--cracker=triscuit", + "--pickle=kosher-dill", + })) + }) + }) + }) + + Context("when converting to structured Arguments", func() { + var args *Arguments + BeforeEach(func() { + args = EmptyArguments() + }) + + It("should skip arguments that don't start with `--`", func() { + rest := SliceToArguments([]string{"-first", "second", "--foo=bar"}, args) + Expect(rest).To(Equal([]string{"-first", "second"})) + Expect(args.AsStrings(nil)).To(Equal([]string{"--foo=bar"})) + }) + + It("should skip arguments that don't contain an `=` because they're ambiguous", func() { + rest := SliceToArguments([]string{"--first", "--second", "--foo=bar"}, args) + Expect(rest).To(Equal([]string{"--first", "--second"})) + Expect(args.AsStrings(nil)).To(Equal([]string{"--foo=bar"})) + }) + + It("should stop at the flag terminator (`--`)", func() { + rest := SliceToArguments([]string{"--first", "--second", "--", "--foo=bar"}, args) + Expect(rest).To(Equal([]string{"--first", "--second", "--", "--foo=bar"})) + Expect(args.AsStrings(nil)).To(BeEmpty()) + }) + + It("should split --foo=bar into Append(foo, bar)", func() { + rest := SliceToArguments([]string{"--foo=bar1", "--foo=bar2"}, args) + Expect(rest).To(BeEmpty()) + Expect(args.Get("foo").Get(nil)).To(Equal([]string{"bar1", "bar2"})) + }) + + It("should split --foo=bar=baz into Append(foo, bar=baz)", func() { + rest := SliceToArguments([]string{"--vmodule=file.go=3", "--vmodule=other.go=4"}, args) + Expect(rest).To(BeEmpty()) + Expect(args.Get("vmodule").Get(nil)).To(Equal([]string{"file.go=3", "other.go=4"})) + }) + + It("should append to existing arguments", func() { + args.Append("foo", "barA") + rest := SliceToArguments([]string{"--foo=bar1", "--foo=bar2"}, args) + Expect(rest).To(BeEmpty()) + Expect(args.Get("foo").Get([]string{"barI"})).To(Equal([]string{"barI", "barA", "bar1", "bar2"})) + }) + }) +}) + +var _ = Describe("Arguments", func() { + Context("when appending", func() { + It("should copy from defaults when appending for the first time", func() { + args := EmptyArguments(). + Append("some-key", "val3") + Expect(args.Get("some-key").Get([]string{"val1", "val2"})).To(Equal([]string{"val1", "val2", "val3"})) + }) + + It("should not copy from defaults if the flag has been disabled previously", func() { + args := EmptyArguments(). + Disable("some-key"). + Append("some-key", "val3") + Expect(args.Get("some-key").Get([]string{"val1", "val2"})).To(Equal([]string{"val3"})) + }) + + It("should only copy defaults the first time", func() { + args := EmptyArguments(). + Append("some-key", "val3", "val4"). + Append("some-key", "val5") + Expect(args.Get("some-key").Get([]string{"val1", "val2"})).To(Equal([]string{"val1", "val2", "val3", "val4", "val5"})) + }) + + It("should not copy from defaults if the flag has been previously overridden", func() { + args := EmptyArguments(). + Set("some-key", "vala"). + Append("some-key", "valb", "valc") + Expect(args.Get("some-key").Get([]string{"val1", "val2"})).To(Equal([]string{"vala", "valb", "valc"})) + }) + + Context("when explicitly overriding defaults", func() { + It("should not copy from defaults, but should append to previous calls", func() { + args := EmptyArguments(). + AppendNoDefaults("some-key", "vala"). + AppendNoDefaults("some-key", "valb", "valc") + Expect(args.Get("some-key").Get([]string{"val1", "val2"})).To(Equal([]string{"vala", "valb", "valc"})) + }) + + It("should not copy from defaults, but should respect previous appends' copies", func() { + args := EmptyArguments(). + Append("some-key", "vala"). + AppendNoDefaults("some-key", "valb", "valc") + Expect(args.Get("some-key").Get([]string{"val1", "val2"})).To(Equal([]string{"val1", "val2", "vala", "valb", "valc"})) + }) + + It("should not copy from defaults if the flag has been previously appended to ignoring defaults", func() { + args := EmptyArguments(). + AppendNoDefaults("some-key", "vala"). + Append("some-key", "valb", "valc") + Expect(args.Get("some-key").Get([]string{"val1", "val2"})).To(Equal([]string{"vala", "valb", "valc"})) + }) + }) + }) + + It("should ignore defaults when overriding", func() { + args := EmptyArguments(). + Set("some-key", "vala") + Expect(args.Get("some-key").Get([]string{"val1", "val2"})).To(Equal([]string{"vala"})) + }) + + It("should allow directly setting the argument value for custom argument types", func() { + args := EmptyArguments(). + SetRaw("custom-key", commaArg{"val3"}). + Append("custom-key", "val4") + Expect(args.Get("custom-key").Get([]string{"val1", "val2"})).To(Equal([]string{"val1,val2,val3,val4"})) + }) + + Context("when rendering flags", func() { + It("should not render defaults for disabled flags", func() { + defs := map[string][]string{ + "some-key": {"val1", "val2"}, + "other-key": {"val"}, + } + args := EmptyArguments(). + Disable("some-key") + Expect(args.AsStrings(defs)).To(ConsistOf("--other-key=val")) + }) + + It("should render name-only flags as --key", func() { + args := EmptyArguments(). + Enable("some-key") + Expect(args.AsStrings(nil)).To(ConsistOf("--some-key")) + }) + + It("should render multiple values as --key=val1, --key=val2", func() { + args := EmptyArguments(). + Append("some-key", "val1", "val2"). + Append("other-key", "vala", "valb") + Expect(args.AsStrings(nil)).To(ConsistOf("--other-key=valb", "--other-key=vala", "--some-key=val1", "--some-key=val2")) + }) + + It("should read from defaults if the user hasn't set a value for a flag", func() { + defs := map[string][]string{ + "some-key": {"val1", "val2"}, + } + args := EmptyArguments(). + Append("other-key", "vala", "valb") + Expect(args.AsStrings(defs)).To(ConsistOf("--other-key=valb", "--other-key=vala", "--some-key=val1", "--some-key=val2")) + }) + + It("should not render defaults if the user has set a value for a flag", func() { + defs := map[string][]string{ + "some-key": {"val1", "val2"}, + } + args := EmptyArguments(). + Set("some-key", "vala") + Expect(args.AsStrings(defs)).To(ConsistOf("--some-key=vala")) + }) + }) +}) + +type commaArg []string + +func (a commaArg) Get(defs []string) []string { + // not quite, but close enough + return []string{strings.Join(defs, ",") + "," + strings.Join(a, ",")} +} +func (a commaArg) Append(vals ...string) Arg { + return commaArg(append(a, vals...)) //nolint:unconvert +} diff --git a/pkg/internal/testing/process/bin_path_finder.go b/pkg/internal/testing/process/bin_path_finder.go new file mode 100644 index 0000000000..e1428aa6e5 --- /dev/null +++ b/pkg/internal/testing/process/bin_path_finder.go @@ -0,0 +1,70 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "os" + "path/filepath" + "regexp" + "strings" +) + +const ( + // EnvAssetsPath is the environment variable that stores the global test + // binary location override. + EnvAssetsPath = "KUBEBUILDER_ASSETS" + // EnvAssetOverridePrefix is the environment variable prefix for per-binary + // location overrides. + EnvAssetOverridePrefix = "TEST_ASSET_" + // AssetsDefaultPath is the default location to look for test binaries in, + // if no override was provided. + AssetsDefaultPath = "/usr/local/kubebuilder/bin" +) + +// BinPathFinder finds the path to the given named binary, using the following locations +// in order of precedence (highest first). Notice that the various env vars only need +// to be set -- the asset is not checked for existence on the filesystem. +// +// 1. TEST_ASSET_{tr/a-z-/A-Z_/} (if set; asset overrides -- EnvAssetOverridePrefix) +// 1. KUBEBUILDER_ASSETS (if set; global asset path -- EnvAssetsPath) +// 3. assetDirectory (if set; per-config asset directory) +// 4. /usr/local/kubebuilder/bin (AssetsDefaultPath). +func BinPathFinder(symbolicName, assetDirectory string) (binPath string) { + punctuationPattern := regexp.MustCompile("[^A-Z0-9]+") + sanitizedName := punctuationPattern.ReplaceAllString(strings.ToUpper(symbolicName), "_") + leadingNumberPattern := regexp.MustCompile("^[0-9]+") + sanitizedName = leadingNumberPattern.ReplaceAllString(sanitizedName, "") + envVar := EnvAssetOverridePrefix + sanitizedName + + // TEST_ASSET_XYZ + if val, ok := os.LookupEnv(envVar); ok { + return val + } + + // KUBEBUILDER_ASSETS + if val, ok := os.LookupEnv(EnvAssetsPath); ok { + return filepath.Join(val, symbolicName) + } + + // assetDirectory + if assetDirectory != "" { + return filepath.Join(assetDirectory, symbolicName) + } + + // default path + return filepath.Join(AssetsDefaultPath, symbolicName) +} diff --git a/pkg/internal/testing/process/bin_path_finder_test.go b/pkg/internal/testing/process/bin_path_finder_test.go new file mode 100644 index 0000000000..425459e3aa --- /dev/null +++ b/pkg/internal/testing/process/bin_path_finder_test.go @@ -0,0 +1,76 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "os" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("BinPathFinder", func() { + var prevAssetPath string + BeforeEach(func() { + prevAssetPath = os.Getenv(EnvAssetsPath) + Expect(os.Unsetenv(EnvAssetsPath)).To(Succeed()) + Expect(os.Unsetenv(EnvAssetOverridePrefix + "_SOME_FAKE")).To(Succeed()) + Expect(os.Unsetenv(EnvAssetOverridePrefix + "OTHERFAKE")).To(Succeed()) + }) + AfterEach(func() { + if prevAssetPath != "" { + Expect(os.Setenv(EnvAssetsPath, prevAssetPath)).To(Succeed()) + } + }) + Context("when individual overrides are present", func() { + BeforeEach(func() { + Expect(os.Setenv(EnvAssetOverridePrefix+"OTHERFAKE", "/other/path")).To(Succeed()) + Expect(os.Setenv(EnvAssetOverridePrefix+"_SOME_FAKE", "/some/path")).To(Succeed()) + // set the global path to make sure we don't prefer it + Expect(os.Setenv(EnvAssetsPath, "/global/path")).To(Succeed()) + }) + + It("should prefer individual overrides, using them unmodified", func() { + Expect(BinPathFinder("otherfake", "/hardcoded/path")).To(Equal("/other/path")) + }) + + It("should convert lowercase to uppercase, remove leading numbers, and replace punctuation with underscores when resolving the env var name", func() { + Expect(BinPathFinder("123.some-fake", "/hardcoded/path")).To(Equal("/some/path")) + }) + }) + + Context("when individual overrides are missing but the global override is present", func() { + BeforeEach(func() { + Expect(os.Setenv(EnvAssetsPath, "/global/path")).To(Succeed()) + }) + It("should prefer the global override, appending the name to that path", func() { + Expect(BinPathFinder("some-fake", "/hardcoded/path")).To(Equal("/global/path/some-fake")) + }) + }) + + Context("when an asset directory is given and no overrides are present", func() { + It("should use the asset directory, appending the name to that path", func() { + Expect(BinPathFinder("some-fake", "/hardcoded/path")).To(Equal("/hardcoded/path/some-fake")) + }) + }) + + Context("when no path configuration is given", func() { + It("should just use the default path", func() { + Expect(BinPathFinder("some-fake", "")).To(Equal("/usr/local/kubebuilder/bin/some-fake")) + }) + }) +}) diff --git a/pkg/internal/testing/process/procattr_other.go b/pkg/internal/testing/process/procattr_other.go new file mode 100644 index 0000000000..df13b341a4 --- /dev/null +++ b/pkg/internal/testing/process/procattr_other.go @@ -0,0 +1,28 @@ +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import "syscall" + +// GetSysProcAttr returns the SysProcAttr to use for the process, +// for non-unix systems this returns nil. +func GetSysProcAttr() *syscall.SysProcAttr { + return nil +} diff --git a/examples/conversion/pkg/apis/apis.go b/pkg/internal/testing/process/procattr_unix.go similarity index 51% rename from examples/conversion/pkg/apis/apis.go rename to pkg/internal/testing/process/procattr_unix.go index 15ccbf9c8a..83ad509af0 100644 --- a/examples/conversion/pkg/apis/apis.go +++ b/pkg/internal/testing/process/procattr_unix.go @@ -1,4 +1,8 @@ +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + /* +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,20 +17,17 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Generate deepcopy for apis -//go:generate go run ../../vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go -O zz_generated.deepcopy -i ./... -h ../../hack/boilerplate.go.txt - -// Package apis contains Kubernetes API groups. -package apis +package process import ( - "k8s.io/apimachinery/pkg/runtime" + "golang.org/x/sys/unix" ) -// AddToSchemes may be used to add all resources defined in the project to a Scheme -var AddToSchemes runtime.SchemeBuilder - -// AddToScheme adds all Resources to the Scheme -func AddToScheme(s *runtime.Scheme) error { - return AddToSchemes.AddToScheme(s) +// GetSysProcAttr returns the SysProcAttr to use for the process, +// for unix systems this returns a SysProcAttr with Setpgid set to true, +// which inherits the parent's process group id. +func GetSysProcAttr() *unix.SysProcAttr { + return &unix.SysProcAttr{ + Setpgid: true, + } } diff --git a/pkg/internal/testing/process/process.go b/pkg/internal/testing/process/process.go new file mode 100644 index 0000000000..0d541921e2 --- /dev/null +++ b/pkg/internal/testing/process/process.go @@ -0,0 +1,276 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "crypto/tls" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "os/exec" + "path" + "regexp" + "sync" + "syscall" + "time" +) + +// ListenAddr represents some listening address and port. +type ListenAddr struct { + Address string + Port string +} + +// URL returns a URL for this address with the given scheme and subpath. +func (l *ListenAddr) URL(scheme string, path string) *url.URL { + return &url.URL{ + Scheme: scheme, + Host: l.HostPort(), + Path: path, + } +} + +// HostPort returns the joined host-port pair for this address. +func (l *ListenAddr) HostPort() string { + return net.JoinHostPort(l.Address, l.Port) +} + +// HealthCheck describes the information needed to health-check a process via +// some health-check URL. +type HealthCheck struct { + url.URL + + // HealthCheckPollInterval is the interval which will be used for polling the + // endpoint described by Host, Port, and Path. + // + // If left empty it will default to 100 Milliseconds. + PollInterval time.Duration +} + +// State define the state of the process. +type State struct { + Cmd *exec.Cmd + + // HealthCheck describes how to check if this process is up. If we get an http.StatusOK, + // we assume the process is ready to operate. + // + // For example, the /healthz endpoint of the k8s API server, or the /health endpoint of etcd. + HealthCheck HealthCheck + + Args []string + + StopTimeout time.Duration + StartTimeout time.Duration + + Dir string + DirNeedsCleaning bool + Path string + + // ready holds whether the process is currently in ready state (hit the ready condition) or not. + // It will be set to true on a successful `Start()` and set to false on a successful `Stop()` + ready bool + + // waitDone is closed when our call to wait finishes up, and indicates that + // our process has terminated. + waitDone chan struct{} + errMu sync.Mutex + exitErr error + exited bool +} + +// Init sets up this process, configuring binary paths if missing, initializing +// temporary directories, etc. +// +// This defaults all defaultable fields. +func (ps *State) Init(name string) error { + if ps.Path == "" { + if name == "" { + return fmt.Errorf("must have at least one of name or path") + } + ps.Path = BinPathFinder(name, "") + } + + if ps.Dir == "" { + newDir, err := os.MkdirTemp("", "k8s_test_framework_") + if err != nil { + return err + } + ps.Dir = newDir + ps.DirNeedsCleaning = true + } + + if ps.StartTimeout == 0 { + ps.StartTimeout = 20 * time.Second + } + + if ps.StopTimeout == 0 { + ps.StopTimeout = 20 * time.Second + } + return nil +} + +type stopChannel chan struct{} + +// CheckFlag checks the help output of this command for the presence of the given flag, specified +// without the leading `--` (e.g. `CheckFlag("insecure-port")` checks for `--insecure-port`), +// returning true if the flag is present. +func (ps *State) CheckFlag(flag string) (bool, error) { + cmd := exec.Command(ps.Path, "--help") + outContents, err := cmd.CombinedOutput() + if err != nil { + return false, fmt.Errorf("unable to run command %q to check for flag %q: %w", ps.Path, flag, err) + } + pat := `(?m)^\s*--` + flag + `\b` // (m --> multi-line --> ^ matches start of line) + matched, err := regexp.Match(pat, outContents) + if err != nil { + return false, fmt.Errorf("unable to check command %q for flag %q in help output: %w", ps.Path, flag, err) + } + return matched, nil +} + +// Start starts the apiserver, waits for it to come up, and returns an error, +// if occurred. +func (ps *State) Start(stdout, stderr io.Writer) (err error) { + if ps.ready { + return nil + } + + ps.Cmd = exec.Command(ps.Path, ps.Args...) + ps.Cmd.Stdout = stdout + ps.Cmd.Stderr = stderr + ps.Cmd.SysProcAttr = GetSysProcAttr() + + ready := make(chan bool) + timedOut := time.After(ps.StartTimeout) + pollerStopCh := make(stopChannel) + go pollURLUntilOK(ps.HealthCheck.URL, ps.HealthCheck.PollInterval, ready, pollerStopCh) + + ps.waitDone = make(chan struct{}) + + if err := ps.Cmd.Start(); err != nil { + ps.errMu.Lock() + defer ps.errMu.Unlock() + ps.exited = true + return err + } + go func() { + defer close(ps.waitDone) + err := ps.Cmd.Wait() + + ps.errMu.Lock() + defer ps.errMu.Unlock() + ps.exitErr = err + ps.exited = true + }() + + select { + case <-ready: + ps.ready = true + return nil + case <-ps.waitDone: + close(pollerStopCh) + return fmt.Errorf("timeout waiting for process %s to start successfully "+ + "(it may have failed to start, or stopped unexpectedly before becoming ready)", + path.Base(ps.Path)) + case <-timedOut: + close(pollerStopCh) + if ps.Cmd != nil { + // intentionally ignore this -- we might've crashed, failed to start, etc + ps.Cmd.Process.Signal(syscall.SIGTERM) //nolint:errcheck + } + return fmt.Errorf("timeout waiting for process %s to start", path.Base(ps.Path)) + } +} + +// Exited returns true if the process exited, and may also +// return an error (as per Cmd.Wait) if the process did not +// exit with error code 0. +func (ps *State) Exited() (bool, error) { + ps.errMu.Lock() + defer ps.errMu.Unlock() + return ps.exited, ps.exitErr +} + +func pollURLUntilOK(url url.URL, interval time.Duration, ready chan bool, stopCh stopChannel) { + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + // there's probably certs *somewhere*, + // but it's fine to just skip validating + // them for health checks during testing + InsecureSkipVerify: true, + }, + }, + } + if interval <= 0 { + interval = 100 * time.Millisecond + } + for { + res, err := client.Get(url.String()) + if err == nil { + res.Body.Close() + if res.StatusCode == http.StatusOK { + ready <- true + return + } + } + + select { + case <-stopCh: + return + default: + time.Sleep(interval) + } + } +} + +// Stop stops this process gracefully, waits for its termination, and cleans up +// the CertDir if necessary. +func (ps *State) Stop() error { + // Always clear the directory if we need to. + defer func() { + if ps.DirNeedsCleaning { + _ = os.RemoveAll(ps.Dir) + } + }() + if ps.Cmd == nil { + return nil + } + if done, _ := ps.Exited(); done { + return nil + } + if err := ps.Cmd.Process.Signal(syscall.SIGTERM); err != nil { + return fmt.Errorf("unable to signal for process %s to stop: %w", ps.Path, err) + } + + timedOut := time.After(ps.StopTimeout) + + select { + case <-ps.waitDone: + break + case <-timedOut: + if err := ps.Cmd.Process.Signal(syscall.SIGKILL); err != nil { + return fmt.Errorf("unable to kill process %s: %w", ps.Path, err) + } + return fmt.Errorf("timeout waiting for process %s to stop", path.Base(ps.Path)) + } + ps.ready = false + return nil +} diff --git a/pkg/internal/testing/process/process_suite_test.go b/pkg/internal/testing/process/process_suite_test.go new file mode 100644 index 0000000000..5a64e9d2f0 --- /dev/null +++ b/pkg/internal/testing/process/process_suite_test.go @@ -0,0 +1,30 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestInternal(t *testing.T) { + t.Parallel() + RegisterFailHandler(Fail) + RunSpecs(t, "Envtest Process Launcher Suite") +} diff --git a/pkg/internal/testing/process/process_test.go b/pkg/internal/testing/process/process_test.go new file mode 100644 index 0000000000..1d01e95b09 --- /dev/null +++ b/pkg/internal/testing/process/process_test.go @@ -0,0 +1,363 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process_test + +import ( + "bytes" + "net" + "net/http" + "net/url" + "os" + "strconv" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/ghttp" + "sigs.k8s.io/controller-runtime/pkg/internal/testing/addr" + . "sigs.k8s.io/controller-runtime/pkg/internal/testing/process" +) + +const ( + healthURLPath = "/healthz" +) + +var _ = Describe("Start method", func() { + var ( + processState *State + server *ghttp.Server + ) + BeforeEach(func() { + server = ghttp.NewServer() + + processState = &State{ + Path: "bash", + Args: simpleBashScript, + HealthCheck: HealthCheck{ + URL: getServerURL(server), + }, + } + processState.Path = "bash" + processState.Args = simpleBashScript + + }) + AfterEach(func() { + server.Close() + }) + + Context("when process takes too long to start", func() { + BeforeEach(func() { + server.RouteToHandler("GET", healthURLPath, func(resp http.ResponseWriter, _ *http.Request) { + time.Sleep(250 * time.Millisecond) + resp.WriteHeader(http.StatusOK) + }) + }) + It("returns a timeout error", func() { + processState.StartTimeout = 200 * time.Millisecond + + err := processState.Start(nil, nil) + Expect(err).To(MatchError(ContainSubstring("timeout"))) + + Eventually(func() bool { done, _ := processState.Exited(); return done }).Should(BeTrue()) + }) + }) + + Context("when the healthcheck returns ok", func() { + BeforeEach(func() { + + server.RouteToHandler("GET", healthURLPath, ghttp.RespondWith(http.StatusOK, "")) + }) + + It("can start a process", func() { + processState.StartTimeout = 10 * time.Second + + err := processState.Start(nil, nil) + Expect(err).NotTo(HaveOccurred()) + + Consistently(processState.Exited).Should(BeFalse()) + }) + + It("hits the endpoint, and successfully starts", func() { + processState.StartTimeout = 100 * time.Millisecond + + err := processState.Start(nil, nil) + Expect(err).NotTo(HaveOccurred()) + Expect(server.ReceivedRequests()).To(HaveLen(1)) + Consistently(processState.Exited).Should(BeFalse()) + }) + + Context("when the command cannot be started", func() { + var err error + + BeforeEach(func() { + processState = &State{} + processState.Path = "/nonexistent" + + err = processState.Start(nil, nil) + }) + + It("propagates the error", func() { + Expect(os.IsNotExist(err)).To(BeTrue()) + }) + + Context("but Stop() is called on it", func() { + It("does not panic", func() { + stoppingFailedProcess := func() { + Expect(processState.Stop()).To(Succeed()) + } + + Expect(stoppingFailedProcess).NotTo(Panic()) + }) + }) + }) + + Context("when IO is configured", func() { + It("can inspect stdout & stderr", func() { + stdout := &bytes.Buffer{} + stderr := &bytes.Buffer{} + + processState.Args = []string{ + "-c", + ` + echo 'this is stderr' >&2 + echo 'that is stdout' + echo 'i started' >&2 + `, + } + processState.StartTimeout = 5 * time.Second + + Expect(processState.Start(stdout, stderr)).To(Succeed()) + Eventually(processState.Exited).Should(BeTrue()) + + Expect(stdout.String()).To(Equal("that is stdout\n")) + Expect(stderr.String()).To(Equal("this is stderr\ni started\n")) + }) + }) + }) + + Context("when the healthcheck always returns failure", func() { + BeforeEach(func() { + server.RouteToHandler("GET", healthURLPath, ghttp.RespondWith(http.StatusInternalServerError, "")) + }) + It("returns a timeout error and stops health API checker", func() { + processState.HealthCheck.URL = getServerURL(server) + processState.HealthCheck.Path = healthURLPath + processState.StartTimeout = 500 * time.Millisecond + + err := processState.Start(nil, nil) + Expect(err).To(MatchError(ContainSubstring("timeout"))) + + nrReceivedRequests := len(server.ReceivedRequests()) + Expect(nrReceivedRequests).To(Equal(5)) + time.Sleep(200 * time.Millisecond) + Expect(nrReceivedRequests).To(Equal(5)) + }) + }) + + Context("when the healthcheck isn't even listening", func() { + BeforeEach(func() { + server.Close() + }) + + It("returns a timeout error", func() { + processState.HealthCheck.Path = healthURLPath + processState.StartTimeout = 500 * time.Millisecond + + port, host, err := addr.Suggest("") + Expect(err).NotTo(HaveOccurred()) + + processState.HealthCheck.URL = url.URL{ + Scheme: "http", + Host: net.JoinHostPort(host, strconv.Itoa(port)), + } + + err = processState.Start(nil, nil) + Expect(err).To(MatchError(ContainSubstring("timeout"))) + }) + }) + + Context("when the healthcheck fails initially but succeeds eventually", func() { + BeforeEach(func() { + server.AppendHandlers( + ghttp.RespondWith(http.StatusInternalServerError, ""), + ghttp.RespondWith(http.StatusInternalServerError, ""), + ghttp.RespondWith(http.StatusInternalServerError, ""), + ghttp.RespondWith(http.StatusOK, ""), + ) + }) + + It("hits the endpoint repeatedly, and successfully starts", func() { + processState.HealthCheck.URL = getServerURL(server) + processState.HealthCheck.Path = healthURLPath + processState.StartTimeout = 20 * time.Second + + err := processState.Start(nil, nil) + Expect(err).NotTo(HaveOccurred()) + Expect(server.ReceivedRequests()).To(HaveLen(4)) + Consistently(processState.Exited).Should(BeFalse()) + }) + + Context("when the polling interval is not configured", func() { + It("uses the default interval for polling", func() { + processState.HealthCheck.URL = getServerURL(server) + processState.HealthCheck.Path = "/helathz" + processState.StartTimeout = 300 * time.Millisecond + + Expect(processState.Start(nil, nil)).To(MatchError(ContainSubstring("timeout"))) + Expect(server.ReceivedRequests()).To(HaveLen(3)) + }) + }) + + Context("when the polling interval is configured", func() { + BeforeEach(func() { + processState.HealthCheck.URL = getServerURL(server) + processState.HealthCheck.Path = healthURLPath + processState.HealthCheck.PollInterval = time.Millisecond * 150 + }) + + It("hits the endpoint in the configured interval", func() { + processState.StartTimeout = 3 * processState.HealthCheck.PollInterval + + Expect(processState.Start(nil, nil)).To(MatchError(ContainSubstring("timeout"))) + Expect(server.ReceivedRequests()).To(HaveLen(3)) + }) + }) + }) +}) + +var _ = Describe("Stop method", func() { + var ( + server *ghttp.Server + processState *State + ) + BeforeEach(func() { + server = ghttp.NewServer() + server.RouteToHandler("GET", healthURLPath, ghttp.RespondWith(http.StatusOK, "")) + processState = &State{ + Path: "bash", + Args: simpleBashScript, + HealthCheck: HealthCheck{ + URL: getServerURL(server), + }, + } + processState.StartTimeout = 10 * time.Second + }) + + AfterEach(func() { + server.Close() + }) + Context("when Stop() is called", func() { + BeforeEach(func() { + Expect(processState.Start(nil, nil)).To(Succeed()) + processState.StopTimeout = 10 * time.Second + }) + + It("stops the process", func() { + Expect(processState.Stop()).To(Succeed()) + }) + + Context("multiple times", func() { + It("does not error or panic on consecutive calls", func() { + stoppingTheProcess := func() { + Expect(processState.Stop()).To(Succeed()) + } + Expect(stoppingTheProcess).NotTo(Panic()) + Expect(stoppingTheProcess).NotTo(Panic()) + Expect(stoppingTheProcess).NotTo(Panic()) + }) + }) + }) + + Context("when the command cannot be stopped", func() { + It("returns a timeout error", func() { + Expect(processState.Start(nil, nil)).To(Succeed()) + processState.StopTimeout = 1 * time.Nanosecond // much shorter than the sleep in the script + + Expect(processState.Stop()).To(MatchError(ContainSubstring("timeout"))) + }) + }) + + Context("when the directory needs to be cleaned up", func() { + It("removes the directory", func() { + var err error + + Expect(processState.Start(nil, nil)).To(Succeed()) + processState.Dir, err = os.MkdirTemp("", "k8s_test_framework_") + Expect(err).NotTo(HaveOccurred()) + processState.DirNeedsCleaning = true + processState.StopTimeout = 400 * time.Millisecond + + Expect(processState.Stop()).To(Succeed()) + Expect(processState.Dir).NotTo(BeAnExistingFile()) + }) + }) +}) + +var _ = Describe("Init", func() { + Context("when all inputs are provided", func() { + It("passes them through", func() { + ps := &State{ + Dir: "/some/dir", + Path: "/some/path/to/some/bin", + StartTimeout: 20 * time.Hour, + StopTimeout: 65537 * time.Millisecond, + } + + Expect(ps.Init("some name")).To(Succeed()) + + Expect(ps.Dir).To(Equal("/some/dir")) + Expect(ps.DirNeedsCleaning).To(BeFalse()) + Expect(ps.Path).To(Equal("/some/path/to/some/bin")) + Expect(ps.StartTimeout).To(Equal(20 * time.Hour)) + Expect(ps.StopTimeout).To(Equal(65537 * time.Millisecond)) + }) + }) + + Context("when inputs are empty", func() { + It("ps them", func() { + ps := &State{} + Expect(ps.Init("some name")).To(Succeed()) + + Expect(ps.Dir).To(BeADirectory()) + Expect(os.RemoveAll(ps.Dir)).To(Succeed()) + Expect(ps.DirNeedsCleaning).To(BeTrue()) + + Expect(ps.Path).NotTo(BeEmpty()) + + Expect(ps.StartTimeout).NotTo(BeZero()) + Expect(ps.StopTimeout).NotTo(BeZero()) + }) + }) + + Context("when neither name nor path are provided", func() { + It("returns an error", func() { + ps := &State{} + Expect(ps.Init("")).To(MatchError("must have at least one of name or path")) + }) + }) +}) + +var simpleBashScript = []string{ + "-c", "tail -f /dev/null", +} + +func getServerURL(server *ghttp.Server) url.URL { + url, err := url.Parse(server.URL()) + Expect(err).NotTo(HaveOccurred()) + url.Path = healthURLPath + return *url +} diff --git a/pkg/leaderelection/doc.go b/pkg/leaderelection/doc.go index 24654faa3c..37a9aefab5 100644 --- a/pkg/leaderelection/doc.go +++ b/pkg/leaderelection/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ /* -Package leaderelection contains a constructors for a leader election resource lock. +Package leaderelection contains a constructor for a leader election resource lock. This is used to ensure that multiple copies of a controller manager can be run with only one active set of controllers, for active-passive HA. diff --git a/pkg/leaderelection/fake/leader_election.go b/pkg/leaderelection/fake/leader_election.go index b0df56d8ee..ab816a19a7 100644 --- a/pkg/leaderelection/fake/leader_election.go +++ b/pkg/leaderelection/fake/leader_election.go @@ -17,7 +17,11 @@ limitations under the License. package fake import ( + "context" + "encoding/json" + "errors" "os" + "sync/atomic" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -28,6 +32,19 @@ import ( "sigs.k8s.io/controller-runtime/pkg/recorder" ) +// ControllableResourceLockInterface is an interface that extends resourcelock.Interface to be +// controllable. +type ControllableResourceLockInterface interface { + resourcelock.Interface + + // BlockLeaderElection blocks the leader election process when called. It will not be unblocked + // until UnblockLeaderElection is called. + BlockLeaderElection() + + // UnblockLeaderElection unblocks the leader election. + UnblockLeaderElection() +} + // NewResourceLock creates a new ResourceLock for use in testing // leader election. func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) { @@ -38,53 +55,82 @@ func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, op } id = id + "_" + string(uuid.NewUUID()) - return &ResourceLock{ + return &resourceLock{ id: id, record: resourcelock.LeaderElectionRecord{ HolderIdentity: id, - LeaseDurationSeconds: 15, + LeaseDurationSeconds: 1, AcquireTime: metav1.NewTime(time.Now()), - RenewTime: metav1.NewTime(time.Now().Add(15 * time.Second)), + RenewTime: metav1.NewTime(time.Now().Add(1 * time.Second)), LeaderTransitions: 1, }, }, nil } -// ResourceLock implements the ResourceLockInterface. +var _ ControllableResourceLockInterface = &resourceLock{} + +// resourceLock implements the ResourceLockInterface. // By default returns that the current identity holds the lock. -type ResourceLock struct { +type resourceLock struct { id string record resourcelock.LeaderElectionRecord + + blockedLeaderElection atomic.Bool } // Get implements the ResourceLockInterface. -func (f *ResourceLock) Get() (*resourcelock.LeaderElectionRecord, error) { - return &f.record, nil +func (f *resourceLock) Get(ctx context.Context) (*resourcelock.LeaderElectionRecord, []byte, error) { + recordBytes, err := json.Marshal(f.record) + if err != nil { + return nil, nil, err + } + return &f.record, recordBytes, nil } // Create implements the ResourceLockInterface. -func (f *ResourceLock) Create(ler resourcelock.LeaderElectionRecord) error { +func (f *resourceLock) Create(ctx context.Context, ler resourcelock.LeaderElectionRecord) error { + if f.blockedLeaderElection.Load() { + // If leader election is blocked, we do not allow creating a new record. + return errors.New("leader election is blocked, cannot create new record") + } + f.record = ler return nil } // Update implements the ResourceLockInterface. -func (f *ResourceLock) Update(ler resourcelock.LeaderElectionRecord) error { +func (f *resourceLock) Update(ctx context.Context, ler resourcelock.LeaderElectionRecord) error { + if f.blockedLeaderElection.Load() { + // If leader election is blocked, we do not allow updating records + return errors.New("leader election is blocked, cannot update record") + } + f.record = ler + return nil } // RecordEvent implements the ResourceLockInterface. -func (f *ResourceLock) RecordEvent(s string) { - return +func (f *resourceLock) RecordEvent(s string) { + } // Identity implements the ResourceLockInterface. -func (f *ResourceLock) Identity() string { +func (f *resourceLock) Identity() string { return f.id } // Describe implements the ResourceLockInterface. -func (f *ResourceLock) Describe() string { +func (f *resourceLock) Describe() string { return f.id } + +// BlockLeaderElection blocks the leader election process when called. +func (f *resourceLock) BlockLeaderElection() { + f.blockedLeaderElection.Store(true) +} + +// UnblockLeaderElection blocks the leader election process when called. +func (f *resourceLock) UnblockLeaderElection() { + f.blockedLeaderElection.Store(false) +} diff --git a/pkg/leaderelection/leader_election.go b/pkg/leaderelection/leader_election.go index 9e13fd7968..6c013e7992 100644 --- a/pkg/leaderelection/leader_election.go +++ b/pkg/leaderelection/leader_election.go @@ -17,44 +17,67 @@ limitations under the License. package leaderelection import ( + "errors" "fmt" - "io/ioutil" "os" + "time" "k8s.io/apimachinery/pkg/util/uuid" - "k8s.io/client-go/kubernetes" + coordinationv1client "k8s.io/client-go/kubernetes/typed/coordination/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/leaderelection/resourcelock" + "sigs.k8s.io/controller-runtime/pkg/recorder" ) const inClusterNamespacePath = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" -// Options provides the required configuration to create a new resource lock +// Options provides the required configuration to create a new resource lock. type Options struct { // LeaderElection determines whether or not to use leader election when // starting the manager. LeaderElection bool + // LeaderElectionResourceLock determines which resource lock to use for leader election, + // defaults to "leases". + LeaderElectionResourceLock string + // LeaderElectionNamespace determines the namespace in which the leader - // election configmap will be created. + // election resource will be created. LeaderElectionNamespace string - // LeaderElectionID determines the name of the configmap that leader election + // LeaderElectionID determines the name of the resource that leader election // will use for holding the leader lock. LeaderElectionID string + + // RenewDeadline is the renew deadline for this leader election client. + // Must be set to ensure the resource lock has an appropriate client timeout. + // Without that, a single slow response from the API server can result + // in losing leadership. + RenewDeadline time.Duration + + // LeaderLabels are an optional set of labels that will be set on the lease object + // when this replica becomes leader + LeaderLabels map[string]string } -// NewResourceLock creates a new config map resource lock for use in a leader -// election loop +// NewResourceLock creates a new resource lock for use in a leader election loop. func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, options Options) (resourcelock.Interface, error) { if !options.LeaderElection { return nil, nil } + // Default resource lock to "leases". The previous default (from v0.7.0 to v0.11.x) was configmapsleases, which was + // used to migrate from configmaps to leases. Since the default was "configmapsleases" for over a year, spanning + // five minor releases, any actively maintained operators are very likely to have a released version that uses + // "configmapsleases". Therefore defaulting to "leases" should be safe. + if options.LeaderElectionResourceLock == "" { + options.LeaderElectionResourceLock = resourcelock.LeasesResourceLock + } - // Default the LeaderElectionID + // LeaderElectionID must be provided to prevent clashes if options.LeaderElectionID == "" { - options.LeaderElectionID = "controller-leader-election-helper" + return nil, errors.New("LeaderElectionID must be configured") } // Default the namespace (if running in cluster) @@ -62,7 +85,7 @@ func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, op var err error options.LeaderElectionNamespace, err = getInClusterNamespace() if err != nil { - return nil, fmt.Errorf("unable to find leader election namespace: %v", err) + return nil, fmt.Errorf("unable to find leader election namespace: %w", err) } } @@ -73,38 +96,57 @@ func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, op } id = id + "_" + string(uuid.NewUUID()) - // Construct client for leader election - client, err := kubernetes.NewForConfig(config) + // Construct config for leader election + config = rest.AddUserAgent(config, "leader-election") + + // Timeout set for a client used to contact to Kubernetes should be lower than + // RenewDeadline to keep a single hung request from forcing a leader loss. + // Setting it to max(time.Second, RenewDeadline/2) as a reasonable heuristic. + if options.RenewDeadline != 0 { + timeout := options.RenewDeadline / 2 + if timeout < time.Second { + timeout = time.Second + } + config.Timeout = timeout + } + + // Construct clients for leader election + corev1Client, err := corev1client.NewForConfig(config) + if err != nil { + return nil, err + } + + coordinationClient, err := coordinationv1client.NewForConfig(config) if err != nil { return nil, err } - // TODO(JoelSpeed): switch to leaderelection object in 1.12 - return resourcelock.New(resourcelock.ConfigMapsResourceLock, + return resourcelock.NewWithLabels(options.LeaderElectionResourceLock, options.LeaderElectionNamespace, options.LeaderElectionID, - client.CoreV1(), - client.CoordinationV1(), + corev1Client, + coordinationClient, resourcelock.ResourceLockConfig{ Identity: id, EventRecorder: recorderProvider.GetEventRecorderFor(id), - }) + }, + options.LeaderLabels, + ) } func getInClusterNamespace() (string, error) { // Check whether the namespace file exists. // If not, we are not running in cluster so can't guess the namespace. - _, err := os.Stat(inClusterNamespacePath) - if os.IsNotExist(err) { + if _, err := os.Stat(inClusterNamespacePath); os.IsNotExist(err) { return "", fmt.Errorf("not running in-cluster, please specify LeaderElectionNamespace") } else if err != nil { - return "", fmt.Errorf("error checking namespace file: %v", err) + return "", fmt.Errorf("error checking namespace file: %w", err) } - // Load the namespace file and return itss content - namespace, err := ioutil.ReadFile(inClusterNamespacePath) + // Load the namespace file and return its content + namespace, err := os.ReadFile(inClusterNamespacePath) if err != nil { - return "", fmt.Errorf("error reading namespace file: %v", err) + return "", fmt.Errorf("error reading namespace file: %w", err) } return string(namespace), nil } diff --git a/pkg/log/deleg.go b/pkg/log/deleg.go index 949117f6bb..6eb551d3b6 100644 --- a/pkg/log/deleg.go +++ b/pkg/log/deleg.go @@ -25,7 +25,7 @@ import ( // loggerPromise knows how to populate a concrete logr.Logger // with options, given an actual base logger later on down the line. type loggerPromise struct { - logger *DelegatingLogger + logger *delegatingLogSink childPromises []*loggerPromise promisesLock sync.Mutex @@ -33,8 +33,7 @@ type loggerPromise struct { tags []interface{} } -// WithName provides a new Logger with the name appended -func (p *loggerPromise) WithName(l *DelegatingLogger, name string) *loggerPromise { +func (p *loggerPromise) WithName(l *delegatingLogSink, name string) *loggerPromise { res := &loggerPromise{ logger: l, name: &name, @@ -47,8 +46,8 @@ func (p *loggerPromise) WithName(l *DelegatingLogger, name string) *loggerPromis return res } -// WithValues provides a new Logger with the tags appended -func (p *loggerPromise) WithValues(l *DelegatingLogger, tags ...interface{}) *loggerPromise { +// WithValues provides a new Logger with the tags appended. +func (p *loggerPromise) WithValues(l *delegatingLogSink, tags ...interface{}) *loggerPromise { res := &loggerPromise{ logger: l, tags: tags, @@ -61,55 +60,124 @@ func (p *loggerPromise) WithValues(l *DelegatingLogger, tags ...interface{}) *lo return res } -// Fulfill instantiates the Logger with the provided logger -func (p *loggerPromise) Fulfill(parentLogger logr.Logger) { - var logger = parentLogger +// Fulfill instantiates the Logger with the provided logger. +func (p *loggerPromise) Fulfill(parentLogSink logr.LogSink) { + sink := parentLogSink if p.name != nil { - logger = logger.WithName(*p.name) + sink = sink.WithName(*p.name) } if p.tags != nil { - logger = logger.WithValues(p.tags...) + sink = sink.WithValues(p.tags...) } - p.logger.Logger = logger + p.logger.lock.Lock() + p.logger.logger = sink + if withCallDepth, ok := sink.(logr.CallDepthLogSink); ok { + p.logger.logger = withCallDepth.WithCallDepth(1) + } p.logger.promise = nil + p.logger.lock.Unlock() for _, childPromise := range p.childPromises { - childPromise.Fulfill(logger) + childPromise.Fulfill(sink) } } -// DelegatingLogger is a logr.Logger that delegates to another logr.Logger. +// delegatingLogSink is a logsink that delegates to another logr.LogSink. // If the underlying promise is not nil, it registers calls to sub-loggers with // the logging factory to be populated later, and returns a new delegating // logger. It expects to have *some* logr.Logger set at all times (generally // a no-op logger before the promises are fulfilled). -type DelegatingLogger struct { - logr.Logger +type delegatingLogSink struct { + lock sync.RWMutex + logger logr.LogSink promise *loggerPromise + info logr.RuntimeInfo +} + +// Init implements logr.LogSink. +func (l *delegatingLogSink) Init(info logr.RuntimeInfo) { + eventuallyFulfillRoot() + l.lock.Lock() + defer l.lock.Unlock() + l.info = info +} + +// Enabled tests whether this Logger is enabled. For example, commandline +// flags might be used to set the logging verbosity and disable some info +// logs. +func (l *delegatingLogSink) Enabled(level int) bool { + eventuallyFulfillRoot() + l.lock.RLock() + defer l.lock.RUnlock() + return l.logger.Enabled(level) +} + +// Info logs a non-error message with the given key/value pairs as context. +// +// The msg argument should be used to add some constant description to +// the log line. The key/value pairs can then be used to add additional +// variable information. The key/value pairs should alternate string +// keys and arbitrary values. +func (l *delegatingLogSink) Info(level int, msg string, keysAndValues ...interface{}) { + eventuallyFulfillRoot() + l.lock.RLock() + defer l.lock.RUnlock() + l.logger.Info(level, msg, keysAndValues...) +} + +// Error logs an error, with the given message and key/value pairs as context. +// It functions similarly to calling Info with the "error" named value, but may +// have unique behavior, and should be preferred for logging errors (see the +// package documentations for more information). +// +// The msg field should be used to add context to any underlying error, +// while the err field should be used to attach the actual error that +// triggered this log line, if present. +func (l *delegatingLogSink) Error(err error, msg string, keysAndValues ...interface{}) { + eventuallyFulfillRoot() + l.lock.RLock() + defer l.lock.RUnlock() + l.logger.Error(err, msg, keysAndValues...) } -// WithName provides a new Logger with the name appended -func (l *DelegatingLogger) WithName(name string) logr.Logger { +// WithName provides a new Logger with the name appended. +func (l *delegatingLogSink) WithName(name string) logr.LogSink { + eventuallyFulfillRoot() + l.lock.RLock() + defer l.lock.RUnlock() + if l.promise == nil { - return l.Logger.WithName(name) + sink := l.logger.WithName(name) + if withCallDepth, ok := sink.(logr.CallDepthLogSink); ok { + sink = withCallDepth.WithCallDepth(-1) + } + return sink } - res := &DelegatingLogger{Logger: l.Logger} + res := &delegatingLogSink{logger: l.logger} promise := l.promise.WithName(res, name) res.promise = promise return res } -// WithValues provides a new Logger with the tags appended -func (l *DelegatingLogger) WithValues(tags ...interface{}) logr.Logger { +// WithValues provides a new Logger with the tags appended. +func (l *delegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { + eventuallyFulfillRoot() + l.lock.RLock() + defer l.lock.RUnlock() + if l.promise == nil { - return l.Logger.WithValues(tags...) + sink := l.logger.WithValues(tags...) + if withCallDepth, ok := sink.(logr.CallDepthLogSink); ok { + sink = withCallDepth.WithCallDepth(-1) + } + return sink } - res := &DelegatingLogger{Logger: l.Logger} + res := &delegatingLogSink{logger: l.logger} promise := l.promise.WithValues(res, tags...) res.promise = promise @@ -119,17 +187,20 @@ func (l *DelegatingLogger) WithValues(tags ...interface{}) logr.Logger { // Fulfill switches the logger over to use the actual logger // provided, instead of the temporary initial one, if this method // has not been previously called. -func (l *DelegatingLogger) Fulfill(actual logr.Logger) { +func (l *delegatingLogSink) Fulfill(actual logr.LogSink) { + if actual == nil { + actual = NullLogSink{} + } if l.promise != nil { l.promise.Fulfill(actual) } } -// NewDelegatingLogger constructs a new DelegatingLogger which uses -// the given logger before it's promise is fulfilled. -func NewDelegatingLogger(initial logr.Logger) *DelegatingLogger { - l := &DelegatingLogger{ - Logger: initial, +// newDelegatingLogSink constructs a new DelegatingLogSink which uses +// the given logger before its promise is fulfilled. +func newDelegatingLogSink(initial logr.LogSink) *delegatingLogSink { + l := &delegatingLogSink{ + logger: initial, promise: &loggerPromise{promisesLock: sync.Mutex{}}, } l.promise.logger = l diff --git a/pkg/log/log.go b/pkg/log/log.go index 128e6542ea..ade21d6fb5 100644 --- a/pkg/log/log.go +++ b/pkg/log/log.go @@ -17,7 +17,7 @@ limitations under the License. // Package log contains utilities for fetching a new logger // when one is not already available. // -// The Log Handle +// # The Log Handle // // This package contains a root logr.Logger Log. It may be used to // get a handle to whatever the root logging implementation is. By @@ -25,24 +25,81 @@ limitations under the License. // to loggers. When the implementation is set using SetLogger, these // "promises" will be converted over to real loggers. // -// Logr +// # Logr // // All logging in controller-runtime is structured, using a set of interfaces // defined by a package called logr -// (https://godoc.org/github.com/go-logr/logr). The sub-package zap provides +// (https://pkg.go.dev/github.com/go-logr/logr). The sub-package zap provides // helpers for setting up logr backed by Zap (go.uber.org/zap). package log import ( + "bytes" + "context" + "fmt" + "os" + "runtime/debug" + "sync/atomic" + "time" + "github.com/go-logr/logr" ) // SetLogger sets a concrete logging implementation for all deferred Loggers. func SetLogger(l logr.Logger) { - Log.Fulfill(l) + logFullfilled.Store(true) + rootLog.Fulfill(l.GetSink()) +} + +func eventuallyFulfillRoot() { + if logFullfilled.Load() { + return + } + if time.Since(rootLogCreated).Seconds() >= 30 { + if logFullfilled.CompareAndSwap(false, true) { + stack := debug.Stack() + stackLines := bytes.Count(stack, []byte{'\n'}) + sep := []byte{'\n', '\t', '>', ' ', ' '} + + fmt.Fprintf(os.Stderr, + "[controller-runtime] log.SetLogger(...) was never called; logs will not be displayed.\nDetected at:%s%s", sep, + // prefix every line, so it's clear this is a stack trace related to the above message + bytes.Replace(stack, []byte{'\n'}, sep, stackLines-1), + ) + SetLogger(logr.New(NullLogSink{})) + } + } } +var ( + logFullfilled atomic.Bool +) + // Log is the base logger used by kubebuilder. It delegates -// to another logr.Logger. You *must* call SetLogger to -// get any actual logging. -var Log = NewDelegatingLogger(NullLogger{}) +// to another logr.Logger. You *must* call SetLogger to +// get any actual logging. If SetLogger is not called within +// the first 30 seconds of a binaries lifetime, it will get +// set to a NullLogSink. +var ( + rootLog, rootLogCreated = func() (*delegatingLogSink, time.Time) { + return newDelegatingLogSink(NullLogSink{}), time.Now() + }() + Log = logr.New(rootLog) +) + +// FromContext returns a logger with predefined values from a context.Context. +func FromContext(ctx context.Context, keysAndValues ...interface{}) logr.Logger { + log := Log + if ctx != nil { + if logger, err := logr.FromContext(ctx); err == nil { + log = logger + } + } + return log.WithValues(keysAndValues...) +} + +// IntoContext takes a context and sets the logger as one of its values. +// Use FromContext function to retrieve the logger. +func IntoContext(ctx context.Context, log logr.Logger) context.Context { + return logr.NewContext(ctx, log) +} diff --git a/pkg/log/log_suite_test.go b/pkg/log/log_suite_test.go index 3fb749caca..f0e349aa86 100644 --- a/pkg/log/log_suite_test.go +++ b/pkg/log/log_suite_test.go @@ -19,12 +19,11 @@ package log import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" ) func TestSource(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, "Log Suite", []Reporter{printer.NewlineReporter{}}) + RunSpecs(t, "Log Suite") } diff --git a/pkg/log/log_test.go b/pkg/log/log_test.go index fa5ff23bca..404a859e4d 100644 --- a/pkg/log/log_test.go +++ b/pkg/log/log_test.go @@ -17,12 +17,16 @@ limitations under the License. package log import ( + "errors" + "github.com/go-logr/logr" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) -// logInfo is the information for a particular fakeLogger message +var _ logr.LogSink = &delegatingLogSink{} + +// logInfo is the information for a particular fakeLogger message. type logInfo struct { name []string tags []interface{} @@ -44,7 +48,10 @@ type fakeLogger struct { root *fakeLoggerRoot } -func (f *fakeLogger) WithName(name string) logr.Logger { +func (f *fakeLogger) Init(info logr.RuntimeInfo) { +} + +func (f *fakeLogger) WithName(name string) logr.LogSink { names := append([]string(nil), f.name...) names = append(names, name) return &fakeLogger{ @@ -54,7 +61,7 @@ func (f *fakeLogger) WithName(name string) logr.Logger { } } -func (f *fakeLogger) WithValues(vals ...interface{}) logr.Logger { +func (f *fakeLogger) WithValues(vals ...interface{}) logr.LogSink { tags := append([]interface{}(nil), f.tags...) tags = append(tags, vals...) return &fakeLogger{ @@ -75,7 +82,7 @@ func (f *fakeLogger) Error(err error, msg string, vals ...interface{}) { }) } -func (f *fakeLogger) Info(msg string, vals ...interface{}) { +func (f *fakeLogger) Info(level int, msg string, vals ...interface{}) { tags := append([]interface{}(nil), f.tags...) tags = append(tags, vals...) f.root.messages = append(f.root.messages, logInfo{ @@ -85,8 +92,7 @@ func (f *fakeLogger) Info(msg string, vals ...interface{}) { }) } -func (f *fakeLogger) Enabled() bool { return true } -func (f *fakeLogger) V(lvl int) logr.InfoLogger { return f } +func (f *fakeLogger) Enabled(level int) bool { return true } var _ = Describe("logging", func() { @@ -98,7 +104,7 @@ var _ = Describe("logging", func() { By("actually setting the logger") logger := &fakeLogger{root: &fakeLoggerRoot{}} - SetLogger(logger) + SetLogger(logr.New(logger)) By("grabbing another sub-logger and logging to both loggers") l2 := Log.WithName("runtimeLog").WithValues("newtag", "newvalue2") @@ -116,24 +122,24 @@ var _ = Describe("logging", func() { Describe("lazy logger initialization", func() { var ( root *fakeLoggerRoot - baseLog logr.Logger - delegLog *DelegatingLogger + baseLog logr.LogSink + delegLog *delegatingLogSink ) BeforeEach(func() { root = &fakeLoggerRoot{} baseLog = &fakeLogger{root: root} - delegLog = NewDelegatingLogger(NullLogger{}) + delegLog = newDelegatingLogSink(NullLogSink{}) }) It("should delegate with name", func() { By("asking for a logger with a name before fulfill, and logging") - befFulfill1 := delegLog.WithName("before-fulfill") + befFulfill1 := logr.New(delegLog).WithName("before-fulfill") befFulfill2 := befFulfill1.WithName("two") befFulfill1.Info("before fulfill") By("logging on the base logger before fulfill") - delegLog.Info("before fulfill base") + logr.New(delegLog).Info("before fulfill base") By("ensuring that no messages were actually recorded") Expect(root.messages).To(BeEmpty()) @@ -149,7 +155,7 @@ var _ = Describe("logging", func() { befFulfill1.WithName("after-from-before").Info("after 3") By("logging with new loggers") - delegLog.WithName("after-fulfill").Info("after 4") + logr.New(delegLog).WithName("after-fulfill").Info("after 4") By("ensuring that the messages are appropriately named") Expect(root.messages).To(ConsistOf( @@ -160,14 +166,80 @@ var _ = Describe("logging", func() { )) }) + // This test in itself will always succeed, a failure will be indicated by the + // race detector going off + It("should be threadsafe", func() { + fulfillDone := make(chan struct{}) + withNameDone := make(chan struct{}) + withValuesDone := make(chan struct{}) + grandChildDone := make(chan struct{}) + logEnabledDone := make(chan struct{}) + logInfoDone := make(chan struct{}) + logErrorDone := make(chan struct{}) + logVDone := make(chan struct{}) + + // Constructing the child in the goroutine does not reliably + // trigger the race detector + child := logr.New(delegLog).WithName("child") + go func() { + defer GinkgoRecover() + delegLog.Fulfill(NullLogSink{}) + close(fulfillDone) + }() + go func() { + defer GinkgoRecover() + delegLog.WithName("with-name") + close(withNameDone) + }() + go func() { + defer GinkgoRecover() + delegLog.WithValues("key", "with-value") + close(withValuesDone) + }() + go func() { + defer GinkgoRecover() + child.WithValues("key", "grandchild") + close(grandChildDone) + }() + go func() { + defer GinkgoRecover() + logr.New(delegLog).Enabled() + close(logEnabledDone) + }() + go func() { + defer GinkgoRecover() + logr.New(delegLog).Info("hello world") + close(logInfoDone) + }() + go func() { + defer GinkgoRecover() + delegLog.Error(errors.New("err"), "hello world") + close(logErrorDone) + }() + go func() { + defer GinkgoRecover() + logr.New(delegLog).V(1) + close(logVDone) + }() + + <-fulfillDone + <-withNameDone + <-withValuesDone + <-grandChildDone + <-logEnabledDone + <-logInfoDone + <-logErrorDone + <-logVDone + }) + It("should delegate with tags", func() { By("asking for a logger with a name before fulfill, and logging") - befFulfill1 := delegLog.WithValues("tag1", "val1") + befFulfill1 := logr.New(delegLog).WithValues("tag1", "val1") befFulfill2 := befFulfill1.WithValues("tag2", "val2") befFulfill1.Info("before fulfill") By("logging on the base logger before fulfill") - delegLog.Info("before fulfill base") + logr.New(delegLog).Info("before fulfill base") By("ensuring that no messages were actually recorded") Expect(root.messages).To(BeEmpty()) @@ -183,7 +255,7 @@ var _ = Describe("logging", func() { befFulfill1.WithValues("tag3", "val3").Info("after 3") By("logging with new loggers") - delegLog.WithValues("tag3", "val3").Info("after 4") + logr.New(delegLog).WithValues("tag3", "val3").Info("after 4") By("ensuring that the messages are appropriately named") Expect(root.messages).To(ConsistOf( @@ -199,13 +271,13 @@ var _ = Describe("logging", func() { delegLog.Fulfill(baseLog) By("logging a bit") - delegLog.Info("msg 1") + logr.New(delegLog).Info("msg 1") By("fulfilling with a new logger") delegLog.Fulfill(&fakeLogger{}) By("logging some more") - delegLog.Info("msg 2") + logr.New(delegLog).Info("msg 2") By("checking that all log messages are present") Expect(root.messages).To(ConsistOf( @@ -213,5 +285,53 @@ var _ = Describe("logging", func() { logInfo{msg: "msg 2"}, )) }) + + It("should handle nil sinks", func() { + By("fulfilling once") + delegLog.Fulfill(logr.Discard().GetSink()) + By("grabbing a sub-logger and logging") + l1 := logr.New(delegLog).WithName("nilsink").WithValues("newtag", "newvalue2") + l1.Info("test") + }) }) + + Describe("logger from context", func() { + It("should return default logger when context is empty", func(ctx SpecContext) { + gotLog := FromContext(ctx) + Expect(gotLog).To(Not(BeNil())) + }) + + It("should return existing logger", func(specCtx SpecContext) { + root := &fakeLoggerRoot{} + baseLog := &fakeLogger{root: root} + + wantLog := logr.New(baseLog).WithName("my-logger") + ctx := IntoContext(specCtx, wantLog) + + gotLog := FromContext(ctx) + Expect(gotLog).To(Not(BeNil())) + + gotLog.Info("test message") + Expect(root.messages).To(ConsistOf( + logInfo{name: []string{"my-logger"}, msg: "test message"}, + )) + }) + + It("should have added key-values", func(specCtx SpecContext) { + root := &fakeLoggerRoot{} + baseLog := &fakeLogger{root: root} + + wantLog := logr.New(baseLog).WithName("my-logger") + ctx := IntoContext(specCtx, wantLog) + + gotLog := FromContext(ctx, "tag1", "value1") + Expect(gotLog).To(Not(BeNil())) + + gotLog.Info("test message") + Expect(root.messages).To(ConsistOf( + logInfo{name: []string{"my-logger"}, tags: []interface{}{"tag1", "value1"}, msg: "test message"}, + )) + }) + }) + }) diff --git a/pkg/log/null.go b/pkg/log/null.go index 4c56f3427b..f3e81074fe 100644 --- a/pkg/log/null.go +++ b/pkg/log/null.go @@ -24,37 +24,36 @@ import ( // but avoids accidentally adding the testing flags to // all binaries. -// NullLogger is a logr.Logger that does nothing. -type NullLogger struct{} +// NullLogSink is a logr.Logger that does nothing. +type NullLogSink struct{} -var _ logr.Logger = NullLogger{} +var _ logr.LogSink = NullLogSink{} -// Info implements logr.InfoLogger -func (NullLogger) Info(_ string, _ ...interface{}) { +// Init implements logr.LogSink. +func (log NullLogSink) Init(logr.RuntimeInfo) { +} + +// Info implements logr.InfoLogger. +func (NullLogSink) Info(_ int, _ string, _ ...interface{}) { // Do nothing. } -// Enabled implements logr.InfoLogger -func (NullLogger) Enabled() bool { +// Enabled implements logr.InfoLogger. +func (NullLogSink) Enabled(level int) bool { return false } -// Error implements logr.Logger -func (NullLogger) Error(_ error, _ string, _ ...interface{}) { +// Error implements logr.Logger. +func (NullLogSink) Error(_ error, _ string, _ ...interface{}) { // Do nothing. } -// V implements logr.Logger -func (log NullLogger) V(_ int) logr.InfoLogger { - return log -} - -// WithName implements logr.Logger -func (log NullLogger) WithName(_ string) logr.Logger { +// WithName implements logr.Logger. +func (log NullLogSink) WithName(_ string) logr.LogSink { return log } -// WithValues implements logr.Logger -func (log NullLogger) WithValues(_ ...interface{}) logr.Logger { +// WithValues implements logr.Logger. +func (log NullLogSink) WithValues(_ ...interface{}) logr.LogSink { return log } diff --git a/pkg/log/warning_handler.go b/pkg/log/warning_handler.go new file mode 100644 index 0000000000..413b56d2e4 --- /dev/null +++ b/pkg/log/warning_handler.go @@ -0,0 +1,75 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "context" + "sync" +) + +// KubeAPIWarningLoggerOptions controls the behavior +// of a rest.WarningHandlerWithContext constructed using NewKubeAPIWarningLogger(). +type KubeAPIWarningLoggerOptions struct { + // Deduplicate indicates a given warning message should only be written once. + // Setting this to true in a long-running process handling many warnings can + // result in increased memory use. + Deduplicate bool +} + +// KubeAPIWarningLogger is a wrapper around +// a provided logr.Logger that implements the +// rest.WarningHandlerWithContext interface. +type KubeAPIWarningLogger struct { + // opts contain options controlling warning output + opts KubeAPIWarningLoggerOptions + // writtenLock gurads written + writtenLock sync.Mutex + // used to keep track of already logged messages + // and help in de-duplication. + written map[string]struct{} +} + +// HandleWarningHeaderWithContext handles logging for responses from API server that are +// warnings with code being 299 and uses a logr.Logger from context for its logging purposes. +func (l *KubeAPIWarningLogger) HandleWarningHeaderWithContext(ctx context.Context, code int, _ string, message string) { + log := FromContext(ctx) + + if code != 299 || len(message) == 0 { + return + } + + if l.opts.Deduplicate { + l.writtenLock.Lock() + defer l.writtenLock.Unlock() + + if _, alreadyLogged := l.written[message]; alreadyLogged { + return + } + l.written[message] = struct{}{} + } + log.Info(message) +} + +// NewKubeAPIWarningLogger returns an implementation of rest.WarningHandlerWithContext that logs warnings +// with code = 299 to the logger passed into HandleWarningHeaderWithContext via the context. +func NewKubeAPIWarningLogger(opts KubeAPIWarningLoggerOptions) *KubeAPIWarningLogger { + h := &KubeAPIWarningLogger{opts: opts} + if opts.Deduplicate { + h.written = map[string]struct{}{} + } + return h +} diff --git a/pkg/log/zap/flags.go b/pkg/log/zap/flags.go new file mode 100644 index 0000000000..4ebac57dcb --- /dev/null +++ b/pkg/log/zap/flags.go @@ -0,0 +1,167 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package zap + +import ( + "flag" + "fmt" + "strconv" + "strings" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +var levelStrings = map[string]zapcore.Level{ + "debug": zap.DebugLevel, + "info": zap.InfoLevel, + "error": zap.ErrorLevel, + "panic": zap.PanicLevel, +} + +var stackLevelStrings = map[string]zapcore.Level{ + "info": zap.InfoLevel, + "error": zap.ErrorLevel, + "panic": zap.PanicLevel, +} + +type encoderFlag struct { + setFunc func(NewEncoderFunc) + value string +} + +var _ flag.Value = &encoderFlag{} + +func (ev *encoderFlag) String() string { + return ev.value +} + +func (ev *encoderFlag) Type() string { + return "encoder" +} + +func (ev *encoderFlag) Set(flagValue string) error { + val := strings.ToLower(flagValue) + switch val { + case "json": + ev.setFunc(newJSONEncoder) + case "console": + ev.setFunc(newConsoleEncoder) + default: + return fmt.Errorf("invalid encoder value \"%s\"", flagValue) + } + ev.value = flagValue + return nil +} + +type levelFlag struct { + setFunc func(zapcore.LevelEnabler) + value string +} + +var _ flag.Value = &levelFlag{} + +func (ev *levelFlag) Set(flagValue string) error { + level, validLevel := levelStrings[strings.ToLower(flagValue)] + if !validLevel { + logLevel, err := strconv.Atoi(flagValue) + if err != nil { + return fmt.Errorf("invalid log level \"%s\"", flagValue) + } + if logLevel > 0 { + intLevel := -1 * logLevel + ev.setFunc(zap.NewAtomicLevelAt(zapcore.Level(int8(intLevel)))) + } else { + return fmt.Errorf("invalid log level \"%s\"", flagValue) + } + } else { + ev.setFunc(zap.NewAtomicLevelAt(level)) + } + ev.value = flagValue + return nil +} + +func (ev *levelFlag) String() string { + return ev.value +} + +func (ev *levelFlag) Type() string { + return "level" +} + +type stackTraceFlag struct { + setFunc func(zapcore.LevelEnabler) + value string +} + +var _ flag.Value = &stackTraceFlag{} + +func (ev *stackTraceFlag) Set(flagValue string) error { + level, validLevel := stackLevelStrings[strings.ToLower(flagValue)] + if !validLevel { + return fmt.Errorf("invalid stacktrace level \"%s\"", flagValue) + } + ev.setFunc(zap.NewAtomicLevelAt(level)) + ev.value = flagValue + return nil +} + +func (ev *stackTraceFlag) String() string { + return ev.value +} + +func (ev *stackTraceFlag) Type() string { + return "level" +} + +type timeEncodingFlag struct { + setFunc func(zapcore.TimeEncoder) + value string +} + +var _ flag.Value = &timeEncodingFlag{} + +func (ev *timeEncodingFlag) String() string { + return ev.value +} + +func (ev *timeEncodingFlag) Type() string { + return "time-encoding" +} + +func (ev *timeEncodingFlag) Set(flagValue string) error { + val := strings.ToLower(flagValue) + switch val { + case "rfc3339nano": + ev.setFunc(zapcore.RFC3339NanoTimeEncoder) + case "rfc3339": + ev.setFunc(zapcore.RFC3339TimeEncoder) + case "iso8601": + ev.setFunc(zapcore.ISO8601TimeEncoder) + case "millis": + ev.setFunc(zapcore.EpochMillisTimeEncoder) + case "nanos": + ev.setFunc(zapcore.EpochNanosTimeEncoder) + case "epoch": + ev.setFunc(zapcore.EpochTimeEncoder) + default: + return fmt.Errorf("invalid time-encoding value \"%s\"", flagValue) + } + + ev.value = flagValue + return nil +} diff --git a/pkg/log/zap/kube_helpers.go b/pkg/log/zap/kube_helpers.go index 2c0d88386d..c47fe6646f 100644 --- a/pkg/log/zap/kube_helpers.go +++ b/pkg/log/zap/kube_helpers.go @@ -18,12 +18,12 @@ package zap import ( "fmt" + "reflect" "go.uber.org/zap/buffer" "go.uber.org/zap/zapcore" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" ) // KubeAwareEncoder is a Kubernetes-aware Zap Encoder. @@ -40,32 +40,21 @@ type KubeAwareEncoder struct { Verbose bool } -// namespacedNameWrapper is a zapcore.ObjectMarshaler for Kubernetes NamespacedName -type namespacedNameWrapper struct { - types.NamespacedName -} - -func (w namespacedNameWrapper) MarshalLogObject(enc zapcore.ObjectEncoder) error { - if w.Namespace != "" { - enc.AddString("namespace", w.Namespace) - } - - enc.AddString("name", w.Name) - - return nil -} - // kubeObjectWrapper is a zapcore.ObjectMarshaler for Kubernetes objects. type kubeObjectWrapper struct { obj runtime.Object } -// MarshalLogObject implements zapcore.ObjectMarshaler +// MarshalLogObject implements zapcore.ObjectMarshaler. func (w kubeObjectWrapper) MarshalLogObject(enc zapcore.ObjectEncoder) error { // TODO(directxman12): log kind and apiversion if not set explicitly (common case) // -- needs an a scheme to convert to the GVK. - gvk := w.obj.GetObjectKind().GroupVersionKind() - if gvk.Version != "" { + + if reflect.ValueOf(w.obj).IsNil() { + return fmt.Errorf("got nil for runtime.Object") + } + + if gvk := w.obj.GetObjectKind().GroupVersionKind(); gvk.Version != "" { enc.AddString("apiVersion", gvk.GroupVersion().String()) enc.AddString("kind", gvk.Kind) } @@ -75,8 +64,7 @@ func (w kubeObjectWrapper) MarshalLogObject(enc zapcore.ObjectEncoder) error { return fmt.Errorf("got runtime.Object without object metadata: %v", w.obj) } - ns := objMeta.GetNamespace() - if ns != "" { + if ns := objMeta.GetNamespace(); ns != "" { enc.AddString("namespace", ns) } enc.AddString("name", objMeta.GetName()) @@ -86,14 +74,15 @@ func (w kubeObjectWrapper) MarshalLogObject(enc zapcore.ObjectEncoder) error { // NB(directxman12): can't just override AddReflected, since the encoder calls AddReflected on itself directly -// Clone implements zapcore.Encoder +// Clone implements zapcore.Encoder. func (k *KubeAwareEncoder) Clone() zapcore.Encoder { return &KubeAwareEncoder{ Encoder: k.Encoder.Clone(), + Verbose: k.Verbose, } } -// EncodeEntry implements zapcore.Encoder +// EncodeEntry implements zapcore.Encoder. func (k *KubeAwareEncoder) EncodeEntry(entry zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) { if k.Verbose { // Kubernetes objects implement fmt.Stringer, so if we @@ -105,7 +94,9 @@ func (k *KubeAwareEncoder) EncodeEntry(entry zapcore.Entry, fields []zapcore.Fie // intercept stringer fields that happen to be Kubernetes runtime.Object or // types.NamespacedName values (Kubernetes runtime.Objects commonly // implement String, apparently). - if field.Type == zapcore.StringerType { + // *unstructured.Unstructured does NOT implement fmt.Striger interface. + // We have handle it specially. + if field.Type == zapcore.StringerType || field.Type == zapcore.ReflectType { switch val := field.Interface.(type) { case runtime.Object: fields[i] = zapcore.Field{ @@ -113,12 +104,6 @@ func (k *KubeAwareEncoder) EncodeEntry(entry zapcore.Entry, fields []zapcore.Fie Key: field.Key, Interface: kubeObjectWrapper{obj: val}, } - case types.NamespacedName: - fields[i] = zapcore.Field{ - Type: zapcore.ObjectMarshalerType, - Key: field.Key, - Interface: namespacedNameWrapper{NamespacedName: val}, - } } } } diff --git a/pkg/log/zap/zap.go b/pkg/log/zap/zap.go index 5812e85bfb..607b6680d5 100644 --- a/pkg/log/zap/zap.go +++ b/pkg/log/zap/zap.go @@ -19,6 +19,7 @@ limitations under the License. package zap import ( + "flag" "io" "os" "time" @@ -29,45 +30,274 @@ import ( "go.uber.org/zap/zapcore" ) -// Logger is a Logger implementation. -// If development is true, a Zap development config will be used -// (stacktraces on warnings, no sampling), otherwise a Zap production -// config will be used (stacktraces on errors, sampling). -func Logger(development bool) logr.Logger { - return LoggerTo(os.Stderr, development) +// EncoderConfigOption is a function that can modify a `zapcore.EncoderConfig`. +type EncoderConfigOption func(*zapcore.EncoderConfig) + +// NewEncoderFunc is a function that creates an Encoder using the provided EncoderConfigOptions. +type NewEncoderFunc func(...EncoderConfigOption) zapcore.Encoder + +// New returns a brand new Logger configured with Opts. It +// uses KubeAwareEncoder which adds Type information and +// Namespace/Name to the log. +func New(opts ...Opts) logr.Logger { + return zapr.NewLogger(NewRaw(opts...)) +} + +// Opts allows to manipulate Options. +type Opts func(*Options) + +// UseDevMode sets the logger to use (or not use) development mode (more +// human-readable output, extra stack traces and logging information, etc). +// See Options.Development. +func UseDevMode(enabled bool) Opts { + return func(o *Options) { + o.Development = enabled + } } -// LoggerTo returns a new Logger implementation using Zap which logs -// to the given destination, instead of stderr. It otherwise behaves like -// ZapLogger. -func LoggerTo(destWriter io.Writer, development bool) logr.Logger { - return zapr.NewLogger(RawLoggerTo(destWriter, development)) +// WriteTo configures the logger to write to the given io.Writer, instead of standard error. +// See Options.DestWriter. +func WriteTo(out io.Writer) Opts { + return func(o *Options) { + o.DestWriter = out + } } -// RawLoggerTo returns a new zap.Logger configured with KubeAwareEncoder -// which logs to a given destination -func RawLoggerTo(destWriter io.Writer, development bool, opts ...zap.Option) *zap.Logger { - // this basically mimics NewConfig, but with a custom sink - sink := zapcore.AddSync(destWriter) - - var enc zapcore.Encoder - var lvl zap.AtomicLevel - if development { - encCfg := zap.NewDevelopmentEncoderConfig() - enc = zapcore.NewConsoleEncoder(encCfg) - lvl = zap.NewAtomicLevelAt(zap.DebugLevel) - opts = append(opts, zap.Development(), zap.AddStacktrace(zap.ErrorLevel)) +// Encoder configures how the logger will encode the output e.g JSON or console. +// See Options.Encoder. +func Encoder(encoder zapcore.Encoder) func(o *Options) { + return func(o *Options) { + o.Encoder = encoder + } +} + +// JSONEncoder configures the logger to use a JSON Encoder. +func JSONEncoder(opts ...EncoderConfigOption) func(o *Options) { + return func(o *Options) { + o.Encoder = newJSONEncoder(opts...) + } +} + +func newJSONEncoder(opts ...EncoderConfigOption) zapcore.Encoder { + encoderConfig := zap.NewProductionEncoderConfig() + for _, opt := range opts { + opt(&encoderConfig) + } + return zapcore.NewJSONEncoder(encoderConfig) +} + +// ConsoleEncoder configures the logger to use a Console encoder. +func ConsoleEncoder(opts ...EncoderConfigOption) func(o *Options) { + return func(o *Options) { + o.Encoder = newConsoleEncoder(opts...) + } +} + +func newConsoleEncoder(opts ...EncoderConfigOption) zapcore.Encoder { + encoderConfig := zap.NewDevelopmentEncoderConfig() + for _, opt := range opts { + opt(&encoderConfig) + } + return zapcore.NewConsoleEncoder(encoderConfig) +} + +// Level sets Options.Level, which configures the minimum enabled logging level e.g Debug, Info. +// A zap log level should be multiplied by -1 to get the logr verbosity. +// For example, to get logr verbosity of 3, pass zapcore.Level(-3) to this Opts. +// See https://pkg.go.dev/github.com/go-logr/zapr for how zap level relates to logr verbosity. +func Level(level zapcore.LevelEnabler) func(o *Options) { + return func(o *Options) { + o.Level = level + } +} + +// StacktraceLevel sets Options.StacktraceLevel, which configures the logger to record a stack trace +// for all messages at or above a given level. +// See the Level Opts for the relationship of zap log level to logr verbosity. +func StacktraceLevel(stacktraceLevel zapcore.LevelEnabler) func(o *Options) { + return func(o *Options) { + o.StacktraceLevel = stacktraceLevel + } +} + +// RawZapOpts allows appending arbitrary zap.Options to configure the underlying zap logger. +// See Options.ZapOpts. +func RawZapOpts(zapOpts ...zap.Option) func(o *Options) { + return func(o *Options) { + o.ZapOpts = append(o.ZapOpts, zapOpts...) + } +} + +// Options contains all possible settings. +type Options struct { + // Development configures the logger to use a Zap development config + // (stacktraces on warnings, no sampling), otherwise a Zap production + // config will be used (stacktraces on errors, sampling). + Development bool + // Encoder configures how Zap will encode the output. Defaults to + // console when Development is true and JSON otherwise + Encoder zapcore.Encoder + // EncoderConfigOptions can modify the EncoderConfig needed to initialize an Encoder. + // See https://pkg.go.dev/go.uber.org/zap/zapcore#EncoderConfig for the list of options + // that can be configured. + // Note that the EncoderConfigOptions are not applied when the Encoder option is already set. + EncoderConfigOptions []EncoderConfigOption + // NewEncoder configures Encoder using the provided EncoderConfigOptions. + // Note that the NewEncoder function is not used when the Encoder option is already set. + NewEncoder NewEncoderFunc + // DestWriter controls the destination of the log output. Defaults to + // os.Stderr. + DestWriter io.Writer + // Level configures the verbosity of the logging. + // Defaults to Debug when Development is true and Info otherwise. + // A zap log level should be multiplied by -1 to get the logr verbosity. + // For example, to get logr verbosity of 3, set this field to zapcore.Level(-3). + // See https://pkg.go.dev/github.com/go-logr/zapr for how zap level relates to logr verbosity. + Level zapcore.LevelEnabler + // StacktraceLevel is the level at and above which stacktraces will + // be recorded for all messages. Defaults to Warn when Development + // is true and Error otherwise. + // See Level for the relationship of zap log level to logr verbosity. + StacktraceLevel zapcore.LevelEnabler + // ZapOpts allows passing arbitrary zap.Options to configure on the + // underlying Zap logger. + ZapOpts []zap.Option + // TimeEncoder specifies the encoder for the timestamps in log messages. + // Defaults to RFC3339TimeEncoder. + TimeEncoder zapcore.TimeEncoder +} + +// addDefaults adds defaults to the Options. +func (o *Options) addDefaults() { + if o.DestWriter == nil { + o.DestWriter = os.Stderr + } + + if o.Development { + if o.NewEncoder == nil { + o.NewEncoder = newConsoleEncoder + } + if o.Level == nil { + lvl := zap.NewAtomicLevelAt(zap.DebugLevel) + o.Level = &lvl + } + if o.StacktraceLevel == nil { + lvl := zap.NewAtomicLevelAt(zap.WarnLevel) + o.StacktraceLevel = &lvl + } + o.ZapOpts = append(o.ZapOpts, zap.Development()) } else { - encCfg := zap.NewProductionEncoderConfig() - enc = zapcore.NewJSONEncoder(encCfg) - lvl = zap.NewAtomicLevelAt(zap.InfoLevel) - opts = append(opts, zap.AddStacktrace(zap.WarnLevel), - zap.WrapCore(func(core zapcore.Core) zapcore.Core { - return zapcore.NewSampler(core, time.Second, 100, 100) - })) - } - opts = append(opts, zap.AddCallerSkip(1), zap.ErrorOutput(sink)) - log := zap.New(zapcore.NewCore(&KubeAwareEncoder{Encoder: enc, Verbose: development}, sink, lvl)) - log = log.WithOptions(opts...) + if o.NewEncoder == nil { + o.NewEncoder = newJSONEncoder + } + if o.Level == nil { + lvl := zap.NewAtomicLevelAt(zap.InfoLevel) + o.Level = &lvl + } + if o.StacktraceLevel == nil { + lvl := zap.NewAtomicLevelAt(zap.ErrorLevel) + o.StacktraceLevel = &lvl + } + // Disable sampling for increased Debug levels. Otherwise, this will + // cause index out of bounds errors in the sampling code. + if !o.Level.Enabled(zapcore.Level(-2)) { + o.ZapOpts = append(o.ZapOpts, + zap.WrapCore(func(core zapcore.Core) zapcore.Core { + return zapcore.NewSamplerWithOptions(core, time.Second, 100, 100) + })) + } + } + + if o.TimeEncoder == nil { + o.TimeEncoder = zapcore.RFC3339TimeEncoder + } + f := func(ecfg *zapcore.EncoderConfig) { + ecfg.EncodeTime = o.TimeEncoder + } + // prepend instead of append it in case someone adds a time encoder option in it + o.EncoderConfigOptions = append([]EncoderConfigOption{f}, o.EncoderConfigOptions...) + + if o.Encoder == nil { + o.Encoder = o.NewEncoder(o.EncoderConfigOptions...) + } + o.ZapOpts = append(o.ZapOpts, zap.AddStacktrace(o.StacktraceLevel)) +} + +// NewRaw returns a new zap.Logger configured with the passed Opts +// or their defaults. It uses KubeAwareEncoder which adds Type +// information and Namespace/Name to the log. +func NewRaw(opts ...Opts) *zap.Logger { + o := &Options{} + for _, opt := range opts { + opt(o) + } + o.addDefaults() + + // this basically mimics NewConfig, but with a custom sink + sink := zapcore.AddSync(o.DestWriter) + + o.ZapOpts = append(o.ZapOpts, zap.ErrorOutput(sink)) + log := zap.New(zapcore.NewCore(&KubeAwareEncoder{Encoder: o.Encoder, Verbose: o.Development}, sink, o.Level)) + log = log.WithOptions(o.ZapOpts...) return log } + +// BindFlags will parse the given flagset for zap option flags and set the log options accordingly: +// - zap-devel: +// Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn) +// Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) +// - zap-encoder: Zap log encoding (one of 'json' or 'console') +// - zap-log-level: Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', 'panic' +// or any integer value > 0 which corresponds to custom debug levels of increasing verbosity"). +// - zap-stacktrace-level: Zap Level at and above which stacktraces are captured (one of 'info', 'error' or 'panic') +// - zap-time-encoding: Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano'), +// Defaults to 'epoch'. +func (o *Options) BindFlags(fs *flag.FlagSet) { + // Set Development mode value + fs.BoolVar(&o.Development, "zap-devel", o.Development, + "Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). "+ + "Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error)") + + // Set Encoder value + var encVal encoderFlag + encVal.setFunc = func(fromFlag NewEncoderFunc) { + o.NewEncoder = fromFlag + } + fs.Var(&encVal, "zap-encoder", "Zap log encoding (one of 'json' or 'console')") + + // Set the Log Level + var levelVal levelFlag + levelVal.setFunc = func(fromFlag zapcore.LevelEnabler) { + o.Level = fromFlag + } + fs.Var(&levelVal, "zap-log-level", + "Zap Level to configure the verbosity of logging. Can be one of 'debug', 'info', 'error', 'panic'"+ + "or any integer value > 0 which corresponds to custom debug levels of increasing verbosity") + + // Set the StrackTrace Level + var stackVal stackTraceFlag + stackVal.setFunc = func(fromFlag zapcore.LevelEnabler) { + o.StacktraceLevel = fromFlag + } + fs.Var(&stackVal, "zap-stacktrace-level", + "Zap Level at and above which stacktraces are captured (one of 'info', 'error', 'panic').") + + // Set the time encoding + var timeEncoderVal timeEncodingFlag + timeEncoderVal.setFunc = func(fromFlag zapcore.TimeEncoder) { + o.TimeEncoder = fromFlag + } + fs.Var(&timeEncoderVal, "zap-time-encoding", "Zap time encoding (one of 'epoch', 'millis', 'nano', 'iso8601', 'rfc3339' or 'rfc3339nano'). Defaults to 'epoch'.") +} + +// UseFlagOptions configures the logger to use the Options set by parsing zap option flags from the CLI. +// +// opts := zap.Options{} +// opts.BindFlags(flag.CommandLine) +// flag.Parse() +// log := zap.New(zap.UseFlagOptions(&opts)) +func UseFlagOptions(in *Options) Opts { + return func(o *Options) { + *o = *in + } +} diff --git a/pkg/log/zap/zap_suite_test.go b/pkg/log/zap/zap_suite_test.go index 711ea0ae17..d7a7f22866 100644 --- a/pkg/log/zap/zap_suite_test.go +++ b/pkg/log/zap/zap_suite_test.go @@ -19,12 +19,11 @@ package zap import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" ) func TestSource(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, "Zap Log Suite", []Reporter{printer.NewlineReporter{}}) + RunSpecs(t, "Zap Log Suite") } diff --git a/pkg/log/zap/zap_test.go b/pkg/log/zap/zap_test.go index 8cbd8818d5..3e80113a65 100644 --- a/pkg/log/zap/zap_test.go +++ b/pkg/log/zap/zap_test.go @@ -19,34 +19,39 @@ package zap import ( "bytes" "encoding/json" - "io/ioutil" + "flag" + "os" + "reflect" "github.com/go-logr/logr" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - kapi "k8s.io/api/core/v1" + "go.uber.org/zap/zapcore" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" ) -// testStringer is a fmt.Stringer +// testStringer is a fmt.Stringer. type testStringer struct{} func (testStringer) String() string { return "value" } -// fakeSyncWriter is a fake zap.SyncerWriter that lets us test if sync was called +// fakeSyncWriter is a fake zap.SyncerWriter that lets us test if sync was called. type fakeSyncWriter bool func (w *fakeSyncWriter) Write(p []byte) (int, error) { return len(p), nil } + func (w *fakeSyncWriter) Sync() error { *w = true return nil } -// logInfo is the information for a particular fakeLogger message +// logInfo is the information for a particular fakeLogger message. type logInfo struct { name []string tags []interface{} @@ -58,6 +63,8 @@ type fakeLoggerRoot struct { messages []logInfo } +var _ logr.LogSink = &fakeLogger{} + // fakeLogger is a fake implementation of logr.Logger that records // messages, tags, and names, // just records the name. @@ -68,7 +75,10 @@ type fakeLogger struct { root *fakeLoggerRoot } -func (f *fakeLogger) WithName(name string) logr.Logger { +func (f *fakeLogger) Init(info logr.RuntimeInfo) { +} + +func (f *fakeLogger) WithName(name string) logr.LogSink { names := append([]string(nil), f.name...) names = append(names, name) return &fakeLogger{ @@ -78,7 +88,7 @@ func (f *fakeLogger) WithName(name string) logr.Logger { } } -func (f *fakeLogger) WithValues(vals ...interface{}) logr.Logger { +func (f *fakeLogger) WithValues(vals ...interface{}) logr.LogSink { tags := append([]interface{}(nil), f.tags...) tags = append(tags, vals...) return &fakeLogger{ @@ -99,7 +109,7 @@ func (f *fakeLogger) Error(err error, msg string, vals ...interface{}) { }) } -func (f *fakeLogger) Info(msg string, vals ...interface{}) { +func (f *fakeLogger) Info(level int, msg string, vals ...interface{}) { tags := append([]interface{}(nil), f.tags...) tags = append(tags, vals...) f.root.messages = append(f.root.messages, logInfo{ @@ -109,123 +119,491 @@ func (f *fakeLogger) Info(msg string, vals ...interface{}) { }) } -func (f *fakeLogger) Enabled() bool { return true } -func (f *fakeLogger) V(lvl int) logr.InfoLogger { return f } +func (f *fakeLogger) Enabled(level int) bool { return true } +func (f *fakeLogger) V(lvl int) logr.LogSink { return f } -var _ = Describe("Zap logger setup", func() { - Context("with the default output", func() { - It("shouldn't fail when setting up production", func() { - Expect(Logger(false)).NotTo(BeNil()) - }) +var _ = Describe("Zap options setup", func() { + var opts *Options - It("shouldn't fail when setting up development", func() { - Expect(Logger(true)).NotTo(BeNil()) - }) + BeforeEach(func() { + opts = &Options{} }) - Context("with custom non-sync output", func() { - It("shouldn't fail when setting up production", func() { - Expect(LoggerTo(ioutil.Discard, false)).NotTo(BeNil()) - }) + It("should enable development mode", func() { + UseDevMode(true)(opts) + Expect(opts.Development).To(BeTrue()) + }) - It("shouldn't fail when setting up development", func() { - Expect(LoggerTo(ioutil.Discard, true)).NotTo(BeNil()) - }) + It("should disable development mode", func() { + UseDevMode(false)(opts) + Expect(opts.Development).To(BeFalse()) }) + It("should set a custom writer", func() { + var w fakeSyncWriter + WriteTo(&w)(opts) + Expect(opts.DestWriter).To(Equal(&w)) + }) +}) + +var _ = Describe("Zap logger setup", func() { Context("when logging kubernetes objects", func() { var logOut *bytes.Buffer var logger logr.Logger - BeforeEach(func() { - logOut = new(bytes.Buffer) - By("setting up the logger") - // use production settings (false) to get just json output - logger = LoggerTo(logOut, false) + defineTests := func() { + It("should log a standard namespaced Kubernetes object name and namespace", func() { + pod := &corev1.Pod{} + pod.Name = "some-pod" + pod.Namespace = "some-ns" + logger.Info("here's a kubernetes object", "thing", pod) + + outRaw := logOut.Bytes() + res := map[string]interface{}{} + Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) + + Expect(res).To(HaveKeyWithValue("thing", map[string]interface{}{ + "name": pod.Name, + "namespace": pod.Namespace, + })) + }) + + It("should work fine with normal stringers", func() { + logger.Info("here's a non-kubernetes stringer", "thing", testStringer{}) + outRaw := logOut.Bytes() + res := map[string]interface{}{} + Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) + + Expect(res).To(HaveKeyWithValue("thing", "value")) + }) + + It("should log a standard non-namespaced Kubernetes object name", func() { + node := &corev1.Node{} + node.Name = "some-node" + logger.Info("here's a kubernetes object", "thing", node) + + outRaw := logOut.Bytes() + res := map[string]interface{}{} + Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) + + Expect(res).To(HaveKeyWithValue("thing", map[string]interface{}{ + "name": node.Name, + })) + }) + + It("should log a standard Kubernetes object's kind, if set", func() { + node := &corev1.Node{} + node.Name = "some-node" + node.APIVersion = "v1" + node.Kind = "Node" + logger.Info("here's a kubernetes object", "thing", node) + + outRaw := logOut.Bytes() + res := map[string]interface{}{} + Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) + + Expect(res).To(HaveKeyWithValue("thing", map[string]interface{}{ + "name": node.Name, + "apiVersion": "v1", + "kind": "Node", + })) + }) + + It("should log a standard non-namespaced NamespacedName name", func() { + name := types.NamespacedName{Name: "some-node"} + logger.Info("here's a kubernetes object", "thing", name) + + outRaw := logOut.Bytes() + res := map[string]interface{}{} + Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) + + Expect(res).To(HaveKeyWithValue("thing", map[string]interface{}{ + "name": name.Name, + })) + }) + + It("should log an unstructured Kubernetes object", func() { + pod := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "some-pod", + "namespace": "some-ns", + }, + }, + } + logger.Info("here's a kubernetes object", "thing", pod) + + outRaw := logOut.Bytes() + res := map[string]interface{}{} + Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) + + Expect(res).To(HaveKeyWithValue("thing", map[string]interface{}{ + "name": "some-pod", + "namespace": "some-ns", + })) + }) + + It("should log a standard namespaced NamespacedName name and namespace", func() { + name := types.NamespacedName{Name: "some-pod", Namespace: "some-ns"} + logger.Info("here's a kubernetes object", "thing", name) + + outRaw := logOut.Bytes() + res := map[string]interface{}{} + Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) + + Expect(res).To(HaveKeyWithValue("thing", map[string]interface{}{ + "name": name.Name, + "namespace": name.Namespace, + })) + }) + + It("should not panic with nil obj", func() { + var pod *corev1.Pod + logger.Info("here's a kubernetes object", "thing", pod) + + outRaw := logOut.Bytes() + Expect(string(outRaw)).Should(ContainSubstring("got nil for runtime.Object")) + }) + } + + Context("with logger created using New", func() { + BeforeEach(func() { + logOut = new(bytes.Buffer) + By("setting up the logger") + // use production settings (false) to get just json output + logger = New(WriteTo(logOut), UseDevMode(false)) + }) + defineTests() }) + }) +}) - It("should log a standard namespaced Kubernetes object name and namespace", func() { - pod := &kapi.Pod{} - pod.Name = "some-pod" - pod.Namespace = "some-ns" - logger.Info("here's a kubernetes object", "thing", pod) +var _ = Describe("Zap log level flag options setup", func() { + var ( + fromFlags Options + fs flag.FlagSet + logInfoLevel0 = "info text" + logDebugLevel1 = "debug 1 text" + logDebugLevel2 = "debug 2 text" + logDebugLevel3 = "debug 3 text" + ) + + BeforeEach(func() { + fromFlags = Options{} + fs = *flag.NewFlagSet(os.Args[0], flag.ExitOnError) + }) + + Context("with zap-log-level options provided", func() { + It("Should output logs for info and debug zap-log-level.", func() { + args := []string{"--zap-log-level=debug"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + logOut := new(bytes.Buffer) + + logger := New(UseFlagOptions(&fromFlags), WriteTo(logOut)) + logger.V(0).Info(logInfoLevel0) + logger.V(1).Info(logDebugLevel1) outRaw := logOut.Bytes() - res := map[string]interface{}{} - Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) - Expect(res).To(HaveKeyWithValue("thing", map[string]interface{}{ - "name": pod.Name, - "namespace": pod.Namespace, - })) + Expect(string(outRaw)).Should(ContainSubstring(logInfoLevel0)) + Expect(string(outRaw)).Should(ContainSubstring(logDebugLevel1)) }) - It("should work fine with normal stringers", func() { - logger.Info("here's a non-kubernetes stringer", "thing", testStringer{}) + It("Should output only error logs, otherwise empty logs", func() { + args := []string{"--zap-log-level=error"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + + logOut := new(bytes.Buffer) + + logger := New(UseFlagOptions(&fromFlags), WriteTo(logOut)) + logger.V(0).Info(logInfoLevel0) + logger.V(1).Info(logDebugLevel1) + outRaw := logOut.Bytes() - res := map[string]interface{}{} - Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) - Expect(res).To(HaveKeyWithValue("thing", "value")) + Expect(outRaw).To(BeEmpty()) }) - It("should log a standard non-namespaced Kubernetes object name", func() { - node := &kapi.Node{} - node.Name = "some-node" - logger.Info("here's a kubernetes object", "thing", node) + It("Should output only panic logs, otherwise empty logs", func() { + args := []string{"--zap-log-level=panic"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + + logOut := new(bytes.Buffer) + + logger := New(UseFlagOptions(&fromFlags), WriteTo(logOut)) + logger.V(0).Info(logInfoLevel0) + logger.V(1).Info(logDebugLevel1) + logger.V(2).Info(logDebugLevel2) outRaw := logOut.Bytes() - res := map[string]interface{}{} - Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) - Expect(res).To(HaveKeyWithValue("thing", map[string]interface{}{ - "name": node.Name, - })) + Expect(outRaw).To(BeEmpty()) }) + }) + + Context("with zap-log-level with increased verbosity.", func() { + It("Should output debug and info log, with default production mode.", func() { + args := []string{"--zap-log-level=1"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + logOut := new(bytes.Buffer) - It("should log a standard Kubernetes object's kind, if set", func() { - node := &kapi.Node{} - node.Name = "some-node" - node.APIVersion = "v1" - node.Kind = "Node" - logger.Info("here's a kubernetes object", "thing", node) + logger := New(UseFlagOptions(&fromFlags), WriteTo(logOut)) + logger.V(0).Info(logInfoLevel0) + logger.V(1).Info(logDebugLevel1) outRaw := logOut.Bytes() - res := map[string]interface{}{} - Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) - Expect(res).To(HaveKeyWithValue("thing", map[string]interface{}{ - "name": node.Name, - "apiVersion": "v1", - "kind": "Node", - })) + Expect(string(outRaw)).Should(ContainSubstring(logInfoLevel0)) + Expect(string(outRaw)).Should(ContainSubstring(logDebugLevel1)) }) - It("should log a standard non-namespaced NamespacedName name", func() { - name := types.NamespacedName{Name: "some-node"} - logger.Info("here's a kubernetes object", "thing", name) + It("Should output info and debug logs, with development mode.", func() { + args := []string{"--zap-log-level=1", "--zap-devel=true"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + logOut := new(bytes.Buffer) + + logger := New(UseFlagOptions(&fromFlags), WriteTo(logOut)) + logger.V(0).Info(logInfoLevel0) + logger.V(1).Info(logDebugLevel1) outRaw := logOut.Bytes() - res := map[string]interface{}{} - Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) - Expect(res).To(HaveKeyWithValue("thing", map[string]interface{}{ - "name": name.Name, - })) + Expect(string(outRaw)).Should(ContainSubstring(logInfoLevel0)) + Expect(string(outRaw)).Should(ContainSubstring(logDebugLevel1)) }) - It("should log a standard namespaced NamespacedName name and namespace", func() { - name := types.NamespacedName{Name: "some-pod", Namespace: "some-ns"} - logger.Info("here's a kubernetes object", "thing", name) + It("Should output info, and debug logs with increased verbosity, and with development mode set to true.", func() { + args := []string{"--zap-log-level=3", "--zap-devel=false"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + logOut := new(bytes.Buffer) + + logger := New(UseFlagOptions(&fromFlags), WriteTo(logOut)) + logger.V(0).Info(logInfoLevel0) + logger.V(1).Info(logDebugLevel1) + logger.V(2).Info(logDebugLevel2) + logger.V(3).Info(logDebugLevel3) + + outRaw := logOut.Bytes() + + Expect(string(outRaw)).Should(ContainSubstring(logInfoLevel0)) + Expect(string(outRaw)).Should(ContainSubstring(logDebugLevel1)) + Expect(string(outRaw)).Should(ContainSubstring(logDebugLevel2)) + Expect(string(outRaw)).Should(ContainSubstring(logDebugLevel3)) + }) + It("Should output info, and debug logs with increased verbosity, and with production mode set to true.", func() { + args := []string{"--zap-log-level=3", "--zap-devel=true"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + logOut := new(bytes.Buffer) + + logger := New(UseFlagOptions(&fromFlags), WriteTo(logOut)) + logger.V(0).Info(logInfoLevel0) + logger.V(1).Info(logDebugLevel1) + logger.V(2).Info(logDebugLevel2) + logger.V(3).Info(logDebugLevel3) outRaw := logOut.Bytes() + + Expect(string(outRaw)).Should(ContainSubstring(logInfoLevel0)) + Expect(string(outRaw)).Should(ContainSubstring(logDebugLevel1)) + Expect(string(outRaw)).Should(ContainSubstring(logDebugLevel2)) + Expect(string(outRaw)).Should(ContainSubstring(logDebugLevel3)) + }) + }) + + Context("with zap-stacktrace-level options provided", func() { + It("Should output stacktrace at info level, with development mode set to true.", func() { + args := []string{"--zap-stacktrace-level=info", "--zap-devel=true"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + out := Options{} + UseFlagOptions(&fromFlags)(&out) + + Expect(out.StacktraceLevel.Enabled(zapcore.InfoLevel)).To(BeTrue()) + }) + + It("Should output stacktrace at error level, with development mode set to true.", func() { + args := []string{"--zap-stacktrace-level=error", "--zap-devel=true"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + out := Options{} + UseFlagOptions(&fromFlags)(&out) + + Expect(out.StacktraceLevel.Enabled(zapcore.ErrorLevel)).To(BeTrue()) + }) + + It("Should output stacktrace at panic level, with development mode set to true.", func() { + args := []string{"--zap-stacktrace-level=panic", "--zap-devel=true"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + out := Options{} + UseFlagOptions(&fromFlags)(&out) + + Expect(out.StacktraceLevel.Enabled(zapcore.PanicLevel)).To(BeTrue()) + Expect(out.StacktraceLevel.Enabled(zapcore.ErrorLevel)).To(BeFalse()) + Expect(out.StacktraceLevel.Enabled(zapcore.InfoLevel)).To(BeFalse()) + }) + }) + + Context("with only -zap-devel flag provided", func() { + It("Should set dev=true.", func() { + args := []string{"--zap-devel=true"} + fromFlags.BindFlags(&fs) + if err := fs.Parse(args); err != nil { + Expect(err).ToNot(HaveOccurred()) + } + out := Options{} + UseFlagOptions(&fromFlags)(&out) + + Expect(out.Development).To(BeTrue()) + Expect(out.Encoder).To(BeNil()) + Expect(out.Level).To(BeNil()) + Expect(out.StacktraceLevel).To(BeNil()) + Expect(out.EncoderConfigOptions).To(BeNil()) + }) + It("Should set dev=false", func() { + args := []string{"--zap-devel=false"} + fromFlags.BindFlags(&fs) + if err := fs.Parse(args); err != nil { + Expect(err).ToNot(HaveOccurred()) + } + out := Options{} + UseFlagOptions(&fromFlags)(&out) + + Expect(out.Development).To(BeFalse()) + Expect(out.Encoder).To(BeNil()) + Expect(out.Level).To(BeNil()) + Expect(out.StacktraceLevel).To(BeNil()) + Expect(out.EncoderConfigOptions).To(BeNil()) + }) + }) + + Context("with zap-time-encoding flag provided", func() { + It("Should set time encoder in options", func() { + args := []string{"--zap-time-encoding=rfc3339"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + + opt := Options{} + UseFlagOptions(&fromFlags)(&opt) + opt.addDefaults() + + optVal := reflect.ValueOf(opt.TimeEncoder) + expVal := reflect.ValueOf(zapcore.RFC3339TimeEncoder) + + Expect(optVal.Pointer()).To(Equal(expVal.Pointer())) + }) + + It("Should default to 'rfc3339' time encoding", func() { + args := []string{""} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + + opt := Options{} + UseFlagOptions(&fromFlags)(&opt) + opt.addDefaults() + + optVal := reflect.ValueOf(opt.TimeEncoder) + expVal := reflect.ValueOf(zapcore.RFC3339TimeEncoder) + + Expect(optVal.Pointer()).To(Equal(expVal.Pointer())) + }) + + It("Should return an error message, with unknown time-encoding", func() { + fs = *flag.NewFlagSet(os.Args[0], flag.ContinueOnError) + args := []string{"--zap-time-encoding=foobar"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).To(HaveOccurred()) + }) + + It("Should propagate time encoder to logger", func() { + // zaps ISO8601TimeEncoder uses 2006-01-02T15:04:05.000Z0700 as pattern for iso8601 encoding + iso8601Pattern := `^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{3}([-+][0-9]{4}|Z)` + + args := []string{"--zap-time-encoding=iso8601"} + fromFlags.BindFlags(&fs) + err := fs.Parse(args) + Expect(err).ToNot(HaveOccurred()) + logOut := new(bytes.Buffer) + + logger := New(UseFlagOptions(&fromFlags), WriteTo(logOut)) + logger.Info("This is a test message") + + outRaw := logOut.Bytes() + + res := map[string]interface{}{} + Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) + Expect(res["ts"]).Should(MatchRegexp(iso8601Pattern)) + }) + }) + + Context("with encoder options provided programmatically", func() { + It("Should set JSON Encoder, with given Millis TimeEncoder option, and MessageKey", func() { + logOut := new(bytes.Buffer) + f := func(ec *zapcore.EncoderConfig) { + ec.MessageKey = "MillisTimeFormat" + } + opts := func(o *Options) { + o.EncoderConfigOptions = append(o.EncoderConfigOptions, f) + } + log := New(UseDevMode(false), WriteTo(logOut), opts) + log.Info("This is a test message") + outRaw := logOut.Bytes() + // Assert for JSON Encoder res := map[string]interface{}{} Expect(json.Unmarshal(outRaw, &res)).To(Succeed()) + // Assert for MessageKey + Expect(string(outRaw)).Should(ContainSubstring("MillisTimeFormat")) + }) - Expect(res).To(HaveKeyWithValue("thing", map[string]interface{}{ - "name": name.Name, - "namespace": name.Namespace, - })) + Context("using Level()", func() { + var logOut *bytes.Buffer + + BeforeEach(func() { + logOut = new(bytes.Buffer) + }) + + It("logs with negative logr level", func() { + By("setting up the logger") + logger := New(WriteTo(logOut), Level(zapcore.Level(-3))) + logger.V(3).Info("test 3") // Should be logged + Expect(logOut.String()).To(ContainSubstring(`"msg":"test 3"`)) + logOut.Truncate(0) + logger.V(1).Info("test 1") // Should be logged + Expect(logOut.String()).To(ContainSubstring(`"msg":"test 1"`)) + logOut.Truncate(0) + logger.V(4).Info("test 4") // Should not be logged + Expect(logOut.String()).To(BeEmpty()) + logger.V(-3).Info("test -3") + Expect(logOut.String()).To(ContainSubstring("test -3")) + }) + It("does not log with positive logr level", func() { + By("setting up the logger") + logger := New(WriteTo(logOut), Level(zapcore.Level(1))) + logger.V(1).Info("test 1") + Expect(logOut.String()).To(BeEmpty()) + logger.V(3).Info("test 3") + Expect(logOut.String()).To(BeEmpty()) + }) }) }) }) diff --git a/pkg/manager/example_test.go b/pkg/manager/example_test.go index c24a2bc55d..02cfa11946 100644 --- a/pkg/manager/example_test.go +++ b/pkg/manager/example_test.go @@ -17,8 +17,10 @@ limitations under the License. package manager_test import ( + "context" "os" + "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client/config" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -49,7 +51,7 @@ func ExampleNew() { } // This example creates a new Manager that has a cache scoped to a list of namespaces. -func ExampleNew_multinamespaceCache() { +func ExampleNew_limitToNamespaces() { cfg, err := config.GetConfig() if err != nil { log.Error(err, "unable to get kubeconfig") @@ -57,8 +59,14 @@ func ExampleNew_multinamespaceCache() { } mgr, err := manager.New(cfg, manager.Options{ - NewCache: cache.MultiNamespacedCacheBuilder([]string{"namespace1", "namespace2"}), - }) + NewCache: func(config *rest.Config, opts cache.Options) (cache.Cache, error) { + opts.DefaultNamespaces = map[string]cache.Config{ + "namespace1": {}, + "namespace2": {}, + } + return cache.New(config, opts) + }}, + ) if err != nil { log.Error(err, "unable to set up manager") os.Exit(1) @@ -68,7 +76,7 @@ func ExampleNew_multinamespaceCache() { // This example adds a Runnable for the Manager to Start. func ExampleManager_add() { - err := mgr.Add(manager.RunnableFunc(func(<-chan struct{}) error { + err := mgr.Add(manager.RunnableFunc(func(context.Context) error { // Do something return nil })) @@ -80,8 +88,7 @@ func ExampleManager_add() { // This example starts a Manager that has had Runnables added. func ExampleManager_start() { - err := mgr.Start(signals.SetupSignalHandler()) - if err != nil { + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { log.Error(err, "unable start the manager") os.Exit(1) } diff --git a/pkg/manager/internal.go b/pkg/manager/internal.go index 9852d0cf40..a2c3e5324d 100644 --- a/pkg/manager/internal.go +++ b/pkg/manager/internal.go @@ -18,344 +18,626 @@ package manager import ( "context" + "errors" "fmt" "net" "net/http" + "net/http/pprof" "sync" + "sync/atomic" "time" - "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/rest" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - logf "sigs.k8s.io/controller-runtime/pkg/internal/log" - "sigs.k8s.io/controller-runtime/pkg/metrics" - "sigs.k8s.io/controller-runtime/pkg/recorder" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/config" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/internal/httpserver" + intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" - "sigs.k8s.io/controller-runtime/pkg/webhook/conversion" ) const ( - // Values taken from: https://github.com/kubernetes/apiserver/blob/master/pkg/apis/config/v1alpha1/defaults.go - defaultLeaseDuration = 15 * time.Second - defaultRenewDeadline = 10 * time.Second - defaultRetryPeriod = 2 * time.Second + // Values taken from: https://github.com/kubernetes/component-base/blob/master/config/v1alpha1/defaults.go + defaultLeaseDuration = 15 * time.Second + defaultRenewDeadline = 10 * time.Second + defaultRetryPeriod = 2 * time.Second + defaultGracefulShutdownPeriod = 30 * time.Second + + defaultReadinessEndpoint = "/readyz" + defaultLivenessEndpoint = "/healthz" ) -var log = logf.RuntimeLog.WithName("manager") +var _ Runnable = &controllerManager{} type controllerManager struct { - // config is the rest.config used to talk to the apiserver. Required. - config *rest.Config - - // scheme is the scheme injected into Controllers, EventHandlers, Sources and Predicates. Defaults - // to scheme.scheme. - scheme *runtime.Scheme - - // leaderElectionRunnables is the set of Controllers that the controllerManager injects deps into and Starts. - // These Runnables are managed by lead election. - leaderElectionRunnables []Runnable - // nonLeaderElectionRunnables is the set of webhook servers that the controllerManager injects deps into and Starts. - // These Runnables will not be blocked by lead election. - nonLeaderElectionRunnables []Runnable - - cache cache.Cache - - // TODO(directxman12): Provide an escape hatch to get individual indexers - // client is the client injected into Controllers (and EventHandlers, Sources and Predicates). - client client.Client + sync.Mutex + started bool - // apiReader is the reader that will make requests to the api server and not the cache. - apiReader client.Reader + stopProcedureEngaged *int64 + errChan chan error + runnables *runnables - // fieldIndexes knows how to add field indexes over the Cache used by this controller, - // which can later be consumed via field selectors from the injected client. - fieldIndexes client.FieldIndexer + // cluster holds a variety of methods to interact with a cluster. Required. + cluster cluster.Cluster // recorderProvider is used to generate event recorders that will be injected into Controllers // (and EventHandlers, Sources and Predicates). - recorderProvider recorder.Provider + recorderProvider *intrec.Provider // resourceLock forms the basis for leader election resourceLock resourcelock.Interface - // mapper is used to map resources to kind, and map kind and version. - mapper meta.RESTMapper + // leaderElectionReleaseOnCancel defines if the manager should step back from the leader lease + // on shutdown + leaderElectionReleaseOnCancel bool - // metricsListener is used to serve prometheus metrics - metricsListener net.Listener + // metricsServer is used to serve prometheus metrics + metricsServer metricsserver.Server - mu sync.Mutex - started bool - errChan chan error + // healthProbeListener is used to serve liveness probe + healthProbeListener net.Listener + + // Readiness probe endpoint name + readinessEndpointName string + + // Liveness probe endpoint name + livenessEndpointName string - // internalStop is the stop channel *actually* used by everything involved - // with the manager as a stop channel, so that we can pass a stop channel - // to things that need it off the bat (like the Channel source). It can - // be closed via `internalStopper` (by being the same underlying channel). - internalStop <-chan struct{} + // Readyz probe handler + readyzHandler *healthz.Handler - // internalStopper is the write side of the internal stop channel, allowing us to close it. - // It and `internalStop` should point to the same channel. - internalStopper chan<- struct{} + // Healthz probe handler + healthzHandler *healthz.Handler - startCache func(stop <-chan struct{}) error + // pprofListener is used to serve pprof + pprofListener net.Listener - // port is the port that the webhook server serves at. - port int - // host is the hostname that the webhook server binds to. - host string + // controllerConfig are the global controller options. + controllerConfig config.Controller - webhookServer *webhook.Server + // Logger is the logger that should be used by this manager. + // If none is set, it defaults to log.Log global logger. + logger logr.Logger + // leaderElectionStopped is an internal channel used to signal the stopping procedure that the + // LeaderElection.Run(...) function has returned and the shutdown can proceed. + leaderElectionStopped chan struct{} + + // leaderElectionCancel is used to cancel the leader election. It is distinct from internalStopper, + // because for safety reasons we need to os.Exit() when we lose the leader election, meaning that + // it must be deferred until after gracefulShutdown is done. + leaderElectionCancel context.CancelFunc + + // elected is closed when this manager becomes the leader of a group of + // managers, either because it won a leader election or because no leader + // election was configured. + elected chan struct{} + + webhookServer webhook.Server + // webhookServerOnce will be called in GetWebhookServer() to optionally initialize + // webhookServer if unset, and Add() it to controllerManager. + webhookServerOnce sync.Once + + // leaderElectionID is the name of the resource that leader election + // will use for holding the leader lock. + leaderElectionID string // leaseDuration is the duration that non-leader candidates will // wait to force acquire leadership. leaseDuration time.Duration - // renewDeadline is the duration that the acting master will retry + // renewDeadline is the duration that the acting controlplane will retry // refreshing leadership before giving up. renewDeadline time.Duration // retryPeriod is the duration the LeaderElector clients should wait // between tries of actions. retryPeriod time.Duration + + // gracefulShutdownTimeout is the duration given to runnable to stop + // before the manager actually returns on stop. + gracefulShutdownTimeout time.Duration + + // onStoppedLeading is callled when the leader election lease is lost. + // It can be overridden for tests. + onStoppedLeading func() + + // shutdownCtx is the context that can be used during shutdown. It will be cancelled + // after the gracefulShutdownTimeout ended. It must not be accessed before internalStop + // is closed because it will be nil. + shutdownCtx context.Context + + internalCtx context.Context + internalCancel context.CancelFunc + + // internalProceduresStop channel is used internally to the manager when coordinating + // the proper shutdown of servers. This channel is also used for dependency injection. + internalProceduresStop chan struct{} +} + +type hasCache interface { + Runnable + GetCache() cache.Cache } // Add sets dependencies on i, and adds it to the list of Runnables to start. func (cm *controllerManager) Add(r Runnable) error { - cm.mu.Lock() - defer cm.mu.Unlock() + cm.Lock() + defer cm.Unlock() + return cm.add(r) +} - // Set dependencies on the object - if err := cm.SetFields(r); err != nil { +func (cm *controllerManager) add(r Runnable) error { + return cm.runnables.Add(r) +} + +// AddMetricsServerExtraHandler adds extra handler served on path to the http server that serves metrics. +func (cm *controllerManager) AddMetricsServerExtraHandler(path string, handler http.Handler) error { + cm.Lock() + defer cm.Unlock() + if cm.started { + return fmt.Errorf("unable to add new metrics handler because metrics endpoint has already been created") + } + if cm.metricsServer == nil { + cm.GetLogger().Info("warn: metrics server is currently disabled, registering extra handler will be ignored", "path", path) + return nil + } + if err := cm.metricsServer.AddExtraHandler(path, handler); err != nil { return err } + cm.logger.V(2).Info("Registering metrics http server extra handler", "path", path) + return nil +} - // Add the runnable to the leader election or the non-leaderelection list - if leRunnable, ok := r.(LeaderElectionRunnable); ok && !leRunnable.NeedLeaderElection() { - cm.nonLeaderElectionRunnables = append(cm.nonLeaderElectionRunnables, r) - } else { - cm.leaderElectionRunnables = append(cm.leaderElectionRunnables, r) - } +// AddHealthzCheck allows you to add Healthz checker. +func (cm *controllerManager) AddHealthzCheck(name string, check healthz.Checker) error { + cm.Lock() + defer cm.Unlock() if cm.started { - // If already started, start the controller - go func() { - cm.errChan <- r.Start(cm.internalStop) - }() + return fmt.Errorf("unable to add new checker because healthz endpoint has already been created") } + if cm.healthzHandler == nil { + cm.healthzHandler = &healthz.Handler{Checks: map[string]healthz.Checker{}} + } + + cm.healthzHandler.Checks[name] = check return nil } -func (cm *controllerManager) SetFields(i interface{}) error { - if _, err := inject.ConfigInto(cm.config, i); err != nil { - return err - } - if _, err := inject.ClientInto(cm.client, i); err != nil { - return err - } - if _, err := inject.APIReaderInto(cm.apiReader, i); err != nil { - return err - } - if _, err := inject.SchemeInto(cm.scheme, i); err != nil { - return err - } - if _, err := inject.CacheInto(cm.cache, i); err != nil { - return err - } - if _, err := inject.InjectorInto(cm.SetFields, i); err != nil { - return err - } - if _, err := inject.StopChannelInto(cm.internalStop, i); err != nil { - return err +// AddReadyzCheck allows you to add Readyz checker. +func (cm *controllerManager) AddReadyzCheck(name string, check healthz.Checker) error { + cm.Lock() + defer cm.Unlock() + + if cm.started { + return fmt.Errorf("unable to add new checker because healthz endpoint has already been created") } - if _, err := inject.MapperInto(cm.mapper, i); err != nil { - return err + + if cm.readyzHandler == nil { + cm.readyzHandler = &healthz.Handler{Checks: map[string]healthz.Checker{}} } + + cm.readyzHandler.Checks[name] = check return nil } +func (cm *controllerManager) GetHTTPClient() *http.Client { + return cm.cluster.GetHTTPClient() +} + func (cm *controllerManager) GetConfig() *rest.Config { - return cm.config + return cm.cluster.GetConfig() } func (cm *controllerManager) GetClient() client.Client { - return cm.client + return cm.cluster.GetClient() } func (cm *controllerManager) GetScheme() *runtime.Scheme { - return cm.scheme + return cm.cluster.GetScheme() } func (cm *controllerManager) GetFieldIndexer() client.FieldIndexer { - return cm.fieldIndexes + return cm.cluster.GetFieldIndexer() } func (cm *controllerManager) GetCache() cache.Cache { - return cm.cache + return cm.cluster.GetCache() } func (cm *controllerManager) GetEventRecorderFor(name string) record.EventRecorder { - return cm.recorderProvider.GetEventRecorderFor(name) + return cm.cluster.GetEventRecorderFor(name) } func (cm *controllerManager) GetRESTMapper() meta.RESTMapper { - return cm.mapper + return cm.cluster.GetRESTMapper() } func (cm *controllerManager) GetAPIReader() client.Reader { - return cm.apiReader + return cm.cluster.GetAPIReader() } -func (cm *controllerManager) GetWebhookServer() *webhook.Server { - if cm.webhookServer == nil { - cm.webhookServer = &webhook.Server{ - Port: cm.port, - Host: cm.host, +func (cm *controllerManager) GetWebhookServer() webhook.Server { + cm.webhookServerOnce.Do(func() { + if cm.webhookServer == nil { + panic("webhook should not be nil") } - cm.webhookServer.Register("/convert", &conversion.Webhook{}) if err := cm.Add(cm.webhookServer); err != nil { - panic("unable to add webhookServer to the controller manager") + panic(fmt.Sprintf("unable to add webhook server to the controller manager: %s", err)) } - } + }) return cm.webhookServer } -func (cm *controllerManager) serveMetrics(stop <-chan struct{}) { - handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{ - ErrorHandling: promhttp.HTTPErrorOnError, +func (cm *controllerManager) GetLogger() logr.Logger { + return cm.logger +} + +func (cm *controllerManager) GetControllerOptions() config.Controller { + return cm.controllerConfig +} + +func (cm *controllerManager) addHealthProbeServer() error { + mux := http.NewServeMux() + srv := httpserver.New(mux) + + if cm.readyzHandler != nil { + mux.Handle(cm.readinessEndpointName, http.StripPrefix(cm.readinessEndpointName, cm.readyzHandler)) + // Append '/' suffix to handle subpaths + mux.Handle(cm.readinessEndpointName+"/", http.StripPrefix(cm.readinessEndpointName, cm.readyzHandler)) + } + if cm.healthzHandler != nil { + mux.Handle(cm.livenessEndpointName, http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler)) + // Append '/' suffix to handle subpaths + mux.Handle(cm.livenessEndpointName+"/", http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler)) + } + + return cm.add(&Server{ + Name: "health probe", + Server: srv, + Listener: cm.healthProbeListener, }) - // TODO(JoelSpeed): Use existing Kubernetes machinery for serving metrics +} + +func (cm *controllerManager) addPprofServer() error { mux := http.NewServeMux() - mux.Handle("/metrics", handler) - server := http.Server{ - Handler: mux, + srv := httpserver.New(mux) + + mux.HandleFunc("/debug/pprof/", pprof.Index) + mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + mux.HandleFunc("/debug/pprof/profile", pprof.Profile) + mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + mux.HandleFunc("/debug/pprof/trace", pprof.Trace) + + return cm.add(&Server{ + Name: "pprof", + Server: srv, + Listener: cm.pprofListener, + }) +} + +// Start starts the manager and waits indefinitely. +// There is only two ways to have start return: +// An error has occurred during in one of the internal operations, +// such as leader election, cache start, webhooks, and so on. +// Or, the context is cancelled. +func (cm *controllerManager) Start(ctx context.Context) (err error) { + cm.Lock() + if cm.started { + cm.Unlock() + return errors.New("manager already started") } - // Run the server - go func() { - if err := server.Serve(cm.metricsListener); err != nil && err != http.ErrServerClosed { - cm.errChan <- err + cm.started = true + + var ready bool + defer func() { + // Only unlock the manager if we haven't reached + // the internal readiness condition. + if !ready { + cm.Unlock() } }() - // Shutdown the server when stop is closed - select { - case <-stop: - if err := server.Shutdown(context.Background()); err != nil { - cm.errChan <- err + // Initialize the internal context. + cm.internalCtx, cm.internalCancel = context.WithCancel(ctx) + + // Leader elector must be created before defer that contains engageStopProcedure function + // https://github.com/kubernetes-sigs/controller-runtime/issues/2873 + var leaderElector *leaderelection.LeaderElector + if cm.resourceLock != nil { + leaderElector, err = cm.initLeaderElector() + if err != nil { + return fmt.Errorf("failed during initialization leader election process: %w", err) } } -} -func (cm *controllerManager) Start(stop <-chan struct{}) error { - // join the passed-in stop channel as an upstream feeding into cm.internalStopper - defer close(cm.internalStopper) + // This chan indicates that stop is complete, in other words all runnables have returned or timeout on stop request + stopComplete := make(chan struct{}) + defer close(stopComplete) + // This must be deferred after closing stopComplete, otherwise we deadlock. + defer func() { + // https://hips.hearstapps.com/hmg-prod.s3.amazonaws.com/images/gettyimages-459889618-1533579787.jpg + stopErr := cm.engageStopProcedure(stopComplete) + if stopErr != nil { + if err != nil { + // Utilerrors.Aggregate allows to use errors.Is for all contained errors + // whereas fmt.Errorf allows wrapping at most one error which means the + // other one can not be found anymore. + err = kerrors.NewAggregate([]error{err, stopErr}) + } else { + err = stopErr + } + } + }() + + // Add the cluster runnable. + if err := cm.add(cm.cluster); err != nil { + return fmt.Errorf("failed to add cluster to runnables: %w", err) + } // Metrics should be served whether the controller is leader or not. // (If we don't serve metrics for non-leaders, prometheus will still scrape - // the pod but will get a connection refused) - if cm.metricsListener != nil { - go cm.serveMetrics(cm.internalStop) + // the pod but will get a connection refused). + if cm.metricsServer != nil { + // Note: We are adding the metrics server directly to HTTPServers here as matching on the + // metricsserver.Server interface in cm.runnables.Add would be very brittle. + if err := cm.runnables.HTTPServers.Add(cm.metricsServer, nil); err != nil { + return fmt.Errorf("failed to add metrics server: %w", err) + } } - go cm.startNonLeaderElectionRunnables() + // Serve health probes. + if cm.healthProbeListener != nil { + if err := cm.addHealthProbeServer(); err != nil { + return fmt.Errorf("failed to add health probe server: %w", err) + } + } - if cm.resourceLock != nil { - err := cm.startLeaderElection() - if err != nil { - return err + // Add pprof server + if cm.pprofListener != nil { + if err := cm.addPprofServer(); err != nil { + return fmt.Errorf("failed to add pprof server: %w", err) + } + } + + // First start any HTTP servers, which includes health probes, metrics and profiling if enabled. + // + // WARNING: HTTPServers includes the health probes, which MUST start before any cache is populated, otherwise + // it would block conversion webhooks to be ready for serving which make the cache never get ready. + logCtx := logr.NewContext(cm.internalCtx, cm.logger) + if err := cm.runnables.HTTPServers.Start(logCtx); err != nil { + return fmt.Errorf("failed to start HTTP servers: %w", err) + } + + // Start any webhook servers, which includes conversion, validation, and defaulting + // webhooks that are registered. + // + // WARNING: Webhooks MUST start before any cache is populated, otherwise there is a race condition + // between conversion webhooks and the cache sync (usually initial list) which causes the webhooks + // to never start because no cache can be populated. + if err := cm.runnables.Webhooks.Start(cm.internalCtx); err != nil { + return fmt.Errorf("failed to start webhooks: %w", err) + } + + // Start and wait for caches. + if err := cm.runnables.Caches.Start(cm.internalCtx); err != nil { + return fmt.Errorf("failed to start caches: %w", err) + } + + // Start the non-leaderelection Runnables after the cache has synced. + if err := cm.runnables.Others.Start(cm.internalCtx); err != nil { + return fmt.Errorf("failed to start other runnables: %w", err) + } + + // Start WarmupRunnables and wait for warmup to complete. + if err := cm.runnables.Warmup.Start(cm.internalCtx); err != nil { + return fmt.Errorf("failed to start warmup runnables: %w", err) + } + + // Start the leader election and all required runnables. + { + // Create a context that inherits all keys from the parent context + // but can be cancelled independently for leader election management + baseCtx := context.WithoutCancel(ctx) + leaderCtx, cancel := context.WithCancel(baseCtx) + cm.leaderElectionCancel = cancel + if leaderElector != nil { + // Start the leader elector process + go func() { + leaderElector.Run(leaderCtx) + <-leaderCtx.Done() + close(cm.leaderElectionStopped) + }() + } else { + go func() { + // Treat not having leader election enabled the same as being elected. + if err := cm.startLeaderElectionRunnables(); err != nil { + cm.errChan <- err + } + close(cm.elected) + }() } - } else { - go cm.startLeaderElectionRunnables() } + ready = true + cm.Unlock() select { - case <-stop: + case <-ctx.Done(): // We are done return nil case err := <-cm.errChan: - // Error starting a controller + // Error starting or running a runnable return err } } -func (cm *controllerManager) startNonLeaderElectionRunnables() { - cm.mu.Lock() - defer cm.mu.Unlock() +// engageStopProcedure signals all runnables to stop, reads potential errors +// from the errChan and waits for them to end. It must not be called more than once. +func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) error { + if !atomic.CompareAndSwapInt64(cm.stopProcedureEngaged, 0, 1) { + return errors.New("stop procedure already engaged") + } - // Start the Cache. Allow the function to start the cache to be mocked out for testing - if cm.startCache == nil { - cm.startCache = cm.cache.Start + // Populate the shutdown context, this operation MUST be done before + // closing the internalProceduresStop channel. + // + // The shutdown context immediately expires if the gracefulShutdownTimeout is not set. + var shutdownCancel context.CancelFunc + if cm.gracefulShutdownTimeout < 0 { + // We want to wait forever for the runnables to stop. + cm.shutdownCtx, shutdownCancel = context.WithCancel(context.Background()) + } else { + cm.shutdownCtx, shutdownCancel = context.WithTimeout(context.Background(), cm.gracefulShutdownTimeout) } + defer shutdownCancel() + + // Start draining the errors before acquiring the lock to make sure we don't deadlock + // if something that has the lock is blocked on trying to write into the unbuffered + // channel after something else already wrote into it. + var closeOnce sync.Once go func() { - if err := cm.startCache(cm.internalStop); err != nil { - cm.errChan <- err + for { + // Closing in the for loop is required to avoid race conditions between + // the closure of all internal procedures and making sure to have a reader off the error channel. + closeOnce.Do(func() { + // Cancel the internal stop channel and wait for the procedures to stop and complete. + close(cm.internalProceduresStop) + cm.internalCancel() + }) + select { + case err := <-cm.errChan: + if !errors.Is(err, context.Canceled) { + cm.logger.Error(err, "error received after stop sequence was engaged") + } + case <-stopComplete: + return + } } }() - // Wait for the caches to sync. - // TODO(community): Check the return value and write a test - cm.cache.WaitForCacheSync(cm.internalStop) + // We want to close this after the other runnables stop, because we don't + // want things like leader election to try and emit events on a closed + // channel + defer cm.recorderProvider.Stop(cm.shutdownCtx) + defer func() { + // Cancel leader election only after we waited. It will os.Exit() the app for safety. + if cm.resourceLock != nil { + // After asking the context to be cancelled, make sure + // we wait for the leader stopped channel to be closed, otherwise + // we might encounter race conditions between this code + // and the event recorder, which is used within leader election code. + cm.leaderElectionCancel() + <-cm.leaderElectionStopped + } + }() - // Start the non-leaderelection Runnables after the cache has synced - for _, c := range cm.nonLeaderElectionRunnables { - // Controllers block, but we want to return an error if any have an error starting. - // Write any Start errors to a channel so we can return them - ctrl := c + go func() { go func() { - cm.errChan <- ctrl.Start(cm.internalStop) + // Stop the warmup runnables in a separate goroutine to avoid blocking. + // It is important to stop the warmup runnables in parallel with the other runnables + // since we cannot assume ordering of whether or not one of the warmup runnables or one + // of the other runnables is holding a lock. + // Cancelling the wrong runnable (one that is not holding the lock) will cause the + // shutdown sequence to block indefinitely as it will wait for the runnable that is + // holding the lock to finish. + cm.logger.Info("Stopping and waiting for warmup runnables") + cm.runnables.Warmup.StopAndWait(cm.shutdownCtx) }() - } - cm.started = true -} + // First stop the non-leader election runnables. + cm.logger.Info("Stopping and waiting for non leader election runnables") + cm.runnables.Others.StopAndWait(cm.shutdownCtx) -func (cm *controllerManager) startLeaderElectionRunnables() { - // Wait for the caches to sync. - // TODO(community): Check the return value and write a test - cm.cache.WaitForCacheSync(cm.internalStop) + // Stop all the leader election runnables, which includes reconcilers. + cm.logger.Info("Stopping and waiting for leader election runnables") + // Prevent leader election when shutting down a non-elected manager + cm.runnables.LeaderElection.startOnce.Do(func() {}) + cm.runnables.LeaderElection.StopAndWait(cm.shutdownCtx) - // Start the leader election Runnables after the cache has synced - for _, c := range cm.leaderElectionRunnables { - // Controllers block, but we want to return an error if any have an error starting. - // Write any Start errors to a channel so we can return them - ctrl := c - go func() { - cm.errChan <- ctrl.Start(cm.internalStop) - }() + // Stop the caches before the leader election runnables, this is an important + // step to make sure that we don't race with the reconcilers by receiving more events + // from the API servers and enqueueing them. + cm.logger.Info("Stopping and waiting for caches") + cm.runnables.Caches.StopAndWait(cm.shutdownCtx) + + // Webhooks and internal HTTP servers should come last, as they might be still serving some requests. + cm.logger.Info("Stopping and waiting for webhooks") + cm.runnables.Webhooks.StopAndWait(cm.shutdownCtx) + + cm.logger.Info("Stopping and waiting for HTTP servers") + cm.runnables.HTTPServers.StopAndWait(cm.shutdownCtx) + + // Proceed to close the manager and overall shutdown context. + cm.logger.Info("Wait completed, proceeding to shutdown the manager") + shutdownCancel() + }() + + <-cm.shutdownCtx.Done() + if err := cm.shutdownCtx.Err(); err != nil && !errors.Is(err, context.Canceled) { + if errors.Is(err, context.DeadlineExceeded) { + if cm.gracefulShutdownTimeout > 0 { + return fmt.Errorf("failed waiting for all runnables to end within grace period of %s: %w", cm.gracefulShutdownTimeout, err) + } + return nil + } + // For any other error, return the error. + return err } - cm.started = true + return nil } -func (cm *controllerManager) startLeaderElection() (err error) { - l, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{ +func (cm *controllerManager) initLeaderElector() (*leaderelection.LeaderElector, error) { + leaderElector, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{ Lock: cm.resourceLock, LeaseDuration: cm.leaseDuration, RenewDeadline: cm.renewDeadline, RetryPeriod: cm.retryPeriod, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: func(_ context.Context) { - cm.startLeaderElectionRunnables() + if err := cm.startLeaderElectionRunnables(); err != nil { + cm.errChan <- err + return + } + close(cm.elected) }, OnStoppedLeading: func() { + if cm.onStoppedLeading != nil { + cm.onStoppedLeading() + } + // Make sure graceful shutdown is skipped if we lost the leader lock without + // intending to. + cm.gracefulShutdownTimeout = time.Duration(0) // Most implementations of leader election log.Fatal() here. // Since Start is wrapped in log.Fatal when called, we can just return // an error here which will cause the program to exit. - cm.errChan <- fmt.Errorf("leader election lost") + cm.errChan <- errors.New("leader election lost") }, }, + ReleaseOnCancel: cm.leaderElectionReleaseOnCancel, + Name: cm.leaderElectionID, }) if err != nil { - return err + return nil, err } - // Start the leader elector process - go l.Run(context.Background()) - return nil + return leaderElector, nil +} + +func (cm *controllerManager) startLeaderElectionRunnables() error { + return cm.runnables.LeaderElection.Start(cm.internalCtx) +} + +func (cm *controllerManager) Elected() <-chan struct{} { + return cm.elected } diff --git a/pkg/manager/internal/integration/api/v1/driver_types.go b/pkg/manager/internal/integration/api/v1/driver_types.go new file mode 100644 index 0000000000..9182ed4cc8 --- /dev/null +++ b/pkg/manager/internal/integration/api/v1/driver_types.go @@ -0,0 +1,93 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// Driver is a test type. +type Driver struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` +} + +// DriverList is a list of Drivers. +type DriverList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Driver `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Driver{}, &DriverList{}) +} + +// DeepCopyInto deep copies into the given Driver. +func (d *Driver) DeepCopyInto(out *Driver) { + *out = *d + out.TypeMeta = d.TypeMeta + d.ObjectMeta.DeepCopyInto(&out.ObjectMeta) +} + +// DeepCopy returns a copy of Driver. +func (d *Driver) DeepCopy() *Driver { + if d == nil { + return nil + } + out := new(Driver) + d.DeepCopyInto(out) + return out +} + +// DeepCopyObject returns a copy of Driver as runtime.Object. +func (d *Driver) DeepCopyObject() runtime.Object { + return d.DeepCopy() +} + +// DeepCopyInto deep copies into the given DriverList. +func (in *DriverList) DeepCopyInto(out *DriverList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Driver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy returns a copy of DriverList. +func (in *DriverList) DeepCopy() *DriverList { + if in == nil { + return nil + } + out := new(DriverList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject returns a copy of DriverList as runtime.Object. +func (in *DriverList) DeepCopyObject() runtime.Object { + return in.DeepCopy() +} + +// Hub marks Driver as a Hub for conversion. +func (*Driver) Hub() {} diff --git a/pkg/manager/internal/integration/api/v1/groupversion_info.go b/pkg/manager/internal/integration/api/v1/groupversion_info.go new file mode 100644 index 0000000000..3986a6d023 --- /dev/null +++ b/pkg/manager/internal/integration/api/v1/groupversion_info.go @@ -0,0 +1,34 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "crew.example.com", Version: "v1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/pkg/manager/internal/integration/api/v2/driver_types.go b/pkg/manager/internal/integration/api/v2/driver_types.go new file mode 100644 index 0000000000..64012ac749 --- /dev/null +++ b/pkg/manager/internal/integration/api/v2/driver_types.go @@ -0,0 +1,111 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "sigs.k8s.io/controller-runtime/pkg/conversion" + v1 "sigs.k8s.io/controller-runtime/pkg/manager/internal/integration/api/v1" +) + +// Driver is a test type. +type Driver struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` +} + +// DriverList is a list of Drivers. +type DriverList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Driver `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Driver{}, &DriverList{}) +} + +// DeepCopyInto deep copies into the given Driver. +func (d *Driver) DeepCopyInto(out *Driver) { + *out = *d + out.TypeMeta = d.TypeMeta + d.ObjectMeta.DeepCopyInto(&out.ObjectMeta) +} + +// DeepCopy returns a copy of Driver. +func (d *Driver) DeepCopy() *Driver { + if d == nil { + return nil + } + out := new(Driver) + d.DeepCopyInto(out) + return out +} + +// DeepCopyObject returns a copy of Driver as runtime.Object. +func (d *Driver) DeepCopyObject() runtime.Object { + return d.DeepCopy() +} + +// DeepCopyInto deep copies into the given DriverList. +func (in *DriverList) DeepCopyInto(out *DriverList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Driver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy returns a copy of DriverList. +func (in *DriverList) DeepCopy() *DriverList { + if in == nil { + return nil + } + out := new(DriverList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject returns a copy of DriverList as runtime.Object. +func (in *DriverList) DeepCopyObject() runtime.Object { + return in.DeepCopy() +} + +// ConvertTo converts Driver to the Hub version of driver. +func (d *Driver) ConvertTo(dstRaw conversion.Hub) error { + dst := dstRaw.(*v1.Driver) + dst.Name = d.Name + dst.Namespace = d.Namespace + dst.UID = d.UID + return nil +} + +// ConvertFrom converts Driver from the Hub version of driver. +func (d *Driver) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*v1.Driver) + d.Name = src.Name + d.Namespace = src.Namespace + d.UID = src.UID + return nil +} diff --git a/pkg/manager/internal/integration/api/v2/groupversion_info.go b/pkg/manager/internal/integration/api/v2/groupversion_info.go new file mode 100644 index 0000000000..218a742793 --- /dev/null +++ b/pkg/manager/internal/integration/api/v2/groupversion_info.go @@ -0,0 +1,34 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "crew.example.com", Version: "v2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/pkg/manager/internal/integration/manager_suite_test.go b/pkg/manager/internal/integration/manager_suite_test.go new file mode 100644 index 0000000000..1a5a20d5a5 --- /dev/null +++ b/pkg/manager/internal/integration/manager_suite_test.go @@ -0,0 +1,29 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestManager(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Manager Integration Suite") +} diff --git a/pkg/manager/internal/integration/manager_test.go b/pkg/manager/internal/integration/manager_test.go new file mode 100644 index 0000000000..c83eead3c1 --- /dev/null +++ b/pkg/manager/internal/integration/manager_test.go @@ -0,0 +1,306 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "context" + "fmt" + "net" + "net/http" + "reflect" + "sync/atomic" + "time" + "unsafe" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/utils/ptr" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/manager" + crewv1 "sigs.k8s.io/controller-runtime/pkg/manager/internal/integration/api/v1" + crewv2 "sigs.k8s.io/controller-runtime/pkg/manager/internal/integration/api/v2" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/conversion" +) + +var ( + scheme = runtime.NewScheme() + + driverCRD = &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "drivers.crew.example.com", + }, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: crewv1.GroupVersion.Group, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "drivers", + Singular: "driver", + Kind: "Driver", + }, + Scope: apiextensionsv1.NamespaceScoped, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: crewv1.GroupVersion.Version, + Served: true, + // v1 will be the storage version. + // Reconciler and index will use v2 so we can validate the conversion webhook works. + Storage: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + }, + }, + }, + { + Name: crewv2.GroupVersion.Version, + Served: true, + Storage: false, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + }, + }, + }, + }, + }, + } + + ctx = ctrl.SetupSignalHandler() +) + +var _ = Describe("manger.Manager Start", func() { + // This test ensure the Manager starts without running into any deadlocks as it can be very tricky + // to start health probes, webhooks, caches (including informers) and reconcilers in the right order. + // + // To verify this we set up a test environment in the following state: + // * Ensure Informer sync requires a functioning conversion webhook (and thus readiness probe) + // * Driver CRD is deployed with v1 as storage version + // * A Driver CR is created and stored in the v1 version + // * Setup manager: + // * Set up health probes + // * Set up a Driver v2 reconciler to verify reconciliation works + // * Set up a conversion webhook which only works if readiness probe succeeds (just like via a Kubernetes service) + // * Add an index on v2 Driver to ensure we start and wait for an informer during cache.Start (as part of manager.Start) + // * Note: cache.Start would fail if the conversion webhook doesn't work (which in turn depends on the readiness probe) + // * Note: Adding the index for v2 ensures the Driver list call during Informer sync goes through conversion. + DescribeTable("should start all components without deadlock", func(warmupEnabled bool) { + // Set up schema. + Expect(clientgoscheme.AddToScheme(scheme)).To(Succeed()) + Expect(apiextensionsv1.AddToScheme(scheme)).To(Succeed()) + Expect(crewv1.AddToScheme(scheme)).To(Succeed()) + Expect(crewv2.AddToScheme(scheme)).To(Succeed()) + + // Set up test environment. + env := &envtest.Environment{ + Scheme: scheme, + CRDInstallOptions: envtest.CRDInstallOptions{ + CRDs: []*apiextensionsv1.CustomResourceDefinition{driverCRD}, + }, + } + // Note: The test env configures a conversion webhook on driverCRD during Start. + cfg, err := env.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + defer func() { + Expect(env.Stop()).To(Succeed()) + }() + c, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + // Create driver CR (which is stored as v1). + driverV1 := &unstructured.Unstructured{} + driverV1.SetGroupVersionKind(crewv1.GroupVersion.WithKind("Driver")) + driverV1.SetName("driver1") + driverV1.SetNamespace(metav1.NamespaceDefault) + Expect(c.Create(ctx, driverV1)).To(Succeed()) + + // Set up Manager. + ctrl.SetLogger(zap.New()) + mgr, err := manager.New(env.Config, manager.Options{ + Scheme: scheme, + HealthProbeBindAddress: ":0", + // Disable metrics to avoid port conflicts. + Metrics: metricsserver.Options{BindAddress: "0"}, + WebhookServer: webhook.NewServer(webhook.Options{ + Port: env.WebhookInstallOptions.LocalServingPort, + Host: env.WebhookInstallOptions.LocalServingHost, + CertDir: env.WebhookInstallOptions.LocalServingCertDir, + }), + }) + Expect(err).NotTo(HaveOccurred()) + + // Configure health probes. + Expect(mgr.AddReadyzCheck("webhook", mgr.GetWebhookServer().StartedChecker())).To(Succeed()) + Expect(mgr.AddHealthzCheck("webhook", mgr.GetWebhookServer().StartedChecker())).To(Succeed()) + + // Set up Driver reconciler (using v2). + driverReconciler := &DriverReconciler{ + Client: mgr.GetClient(), + } + Expect( + ctrl.NewControllerManagedBy(mgr). + For(&crewv2.Driver{}). + Named(fmt.Sprintf("driver_warmup_%t", warmupEnabled)). + WithOptions(controller.Options{EnableWarmup: ptr.To(warmupEnabled)}). + Complete(driverReconciler), + ).To(Succeed()) + + // Set up a conversion webhook. + conversionWebhook := createConversionWebhook(mgr) + mgr.GetWebhookServer().Register("/convert", conversionWebhook) + + // Add an index on Driver (using v2). + // Note: This triggers the creation of an Informer for Driver v2. + Expect(mgr.GetCache().IndexField(ctx, &crewv2.Driver{}, "name", func(object client.Object) []string { + return []string{object.GetName()} + })).To(Succeed()) + + // Start the Manager. + ctx, cancel := context.WithCancel(ctx) + go func() { + defer GinkgoRecover() + Expect(mgr.Start(ctx)).To(Succeed()) + }() + + // Verify manager.Start successfully started health probes, webhooks, caches (including informers) and reconcilers. + // Notes: + // * The cache will only start successfully if the informer for v2 Driver is synced. + // * The informer for v2 Driver will only sync if a list on v2 Driver succeeds (which requires a working conversion webhook) + select { + case <-time.After(30 * time.Second): + // Don't wait forever if the manager doesn't come up. + Fail("Manager didn't start in time") + case <-mgr.Elected(): + } + + // Verify the reconciler reconciles. + Eventually(func(g Gomega) { + g.Expect(atomic.LoadUint64(&driverReconciler.ReconcileCount)).Should(BeNumerically(">", 0)) + }, 10*time.Second).Should(Succeed()) + + // Verify conversion webhook was called. + Expect(atomic.LoadUint64(&conversionWebhook.ConversionCount)).Should(BeNumerically(">", 0)) + + // Verify the conversion webhook works by getting the Driver as v1 and v2. + Expect(c.Get(ctx, client.ObjectKeyFromObject(driverV1), driverV1)).To(Succeed()) + driverV2 := &unstructured.Unstructured{} + driverV2.SetGroupVersionKind(crewv2.GroupVersion.WithKind("Driver")) + driverV2.SetName("driver1") + driverV2.SetNamespace(metav1.NamespaceDefault) + Expect(c.Get(ctx, client.ObjectKeyFromObject(driverV2), driverV2)).To(Succeed()) + + // Shutdown the server + cancel() + }, + Entry("controller warmup enabled", true), + Entry("controller warmup not enabled", false), + ) +}) + +type DriverReconciler struct { + Client client.Client + ReconcileCount uint64 +} + +func (r *DriverReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + log := ctrl.LoggerFrom(ctx) + log.Info("Reconciling") + + // Fetch the Driver instance. + cluster := &crewv2.Driver{} + if err := r.Client.Get(ctx, req.NamespacedName, cluster); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + + // Error reading the object - requeue the request. + return ctrl.Result{}, err + } + + atomic.AddUint64(&r.ReconcileCount, 1) + + return reconcile.Result{}, nil +} + +// ConversionWebhook is just a shim around the conversion handler from +// the webhook package. We use it to simulate the behavior of a conversion +// webhook in a real cluster, i.e. the conversion webhook only works after the +// controller Pod is ready (the readiness probe is up). +type ConversionWebhook struct { + httpClient http.Client + conversionHandler http.Handler + readinessEndpoint string + ConversionCount uint64 +} + +func createConversionWebhook(mgr manager.Manager) *ConversionWebhook { + conversionHandler := conversion.NewWebhookHandler(mgr.GetScheme()) + httpClient := http.Client{ + // Setting a timeout to not get stuck when calling the readiness probe. + Timeout: 5 * time.Second, + } + + // Read the unexported healthProbeListener field of the manager to get the listener address. + // This is a hack but it's better than using a hard-coded port. + v := reflect.ValueOf(mgr).Elem() + field := v.FieldByName("healthProbeListener") + healthProbeListener := *(*net.Listener)(unsafe.Pointer(field.UnsafeAddr())) + readinessEndpoint := fmt.Sprint("http://", healthProbeListener.Addr().String(), "/readyz") + + return &ConversionWebhook{ + httpClient: httpClient, + conversionHandler: conversionHandler, + readinessEndpoint: readinessEndpoint, + } +} + +func (c *ConversionWebhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { + resp, err := c.httpClient.Get(c.readinessEndpoint) + if err != nil { + logf.Log.WithName("conversion-webhook").Error(err, "failed to serve conversion: readiness endpoint is not up") + w.WriteHeader(http.StatusInternalServerError) + return + } + + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + // This simulates the behavior in Kubernetes that conversion webhooks are only served after + // the controller is ready (and thus the Kubernetes service sends requests to the controller). + logf.Log.WithName("conversion-webhook").Info("failed to serve conversion: controller is not ready yet") + w.WriteHeader(http.StatusInternalServerError) + return + } + + atomic.AddUint64(&c.ConversionCount, 1) + c.conversionHandler.ServeHTTP(w, r) +} diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 670df79d08..e0e94245e7 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -17,24 +17,32 @@ limitations under the License. package manager import ( + "context" + "errors" "fmt" "net" + "net/http" "time" "github.com/go-logr/logr" - + coordinationv1 "k8s.io/api/coordination/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - internalrecorder "sigs.k8s.io/controller-runtime/pkg/internal/recorder" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/config" + "sigs.k8s.io/controller-runtime/pkg/healthz" + intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" "sigs.k8s.io/controller-runtime/pkg/leaderelection" - "sigs.k8s.io/controller-runtime/pkg/metrics" + "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/recorder" "sigs.k8s.io/controller-runtime/pkg/webhook" ) @@ -42,147 +50,266 @@ import ( // Manager initializes shared dependencies such as Caches and Clients, and provides them to Runnables. // A Manager is required to create Controllers. type Manager interface { + // Cluster holds a variety of methods to interact with a cluster. + cluster.Cluster + // Add will set requested dependencies on the component, and cause the component to be - // started when Start is called. Add will inject any dependencies for which the argument - // implements the inject interface - e.g. inject.Client. + // started when Start is called. // Depending on if a Runnable implements LeaderElectionRunnable interface, a Runnable can be run in either // non-leaderelection mode (always running) or leader election mode (managed by leader election if enabled). Add(Runnable) error - // SetFields will set any dependencies on an object for which the object has implemented the inject - // interface - e.g. inject.Client. - SetFields(interface{}) error - - // Start starts all registered Controllers and blocks until the Stop channel is closed. - // Returns an error if there is an error starting any controller. - Start(<-chan struct{}) error - - // GetConfig returns an initialized Config - GetConfig() *rest.Config - - // GetScheme returns an initialized Scheme - GetScheme() *runtime.Scheme + // Elected is closed when this manager is elected leader of a group of + // managers, either because it won a leader election or because no leader + // election was configured. + Elected() <-chan struct{} - // GetClient returns a client configured with the Config - GetClient() client.Client + // AddMetricsServerExtraHandler adds an extra handler served on path to the http server that serves metrics. + // Might be useful to register some diagnostic endpoints e.g. pprof. + // + // Note that these endpoints are meant to be sensitive and shouldn't be exposed publicly. + // + // If the simple path -> handler mapping offered here is not enough, + // a new http server/listener should be added as Runnable to the manager via Add method. + AddMetricsServerExtraHandler(path string, handler http.Handler) error - // GetFieldIndexer returns a client.FieldIndexer configured with the client - GetFieldIndexer() client.FieldIndexer + // AddHealthzCheck allows you to add Healthz checker + AddHealthzCheck(name string, check healthz.Checker) error - // GetCache returns a cache.Cache - GetCache() cache.Cache + // AddReadyzCheck allows you to add Readyz checker + AddReadyzCheck(name string, check healthz.Checker) error - // GetEventRecorderFor returns a new EventRecorder for the provided name - GetEventRecorderFor(name string) record.EventRecorder + // Start starts all registered Controllers and blocks until the context is cancelled. + // Returns an error if there is an error starting any controller. + // + // If LeaderElection is used, the binary must be exited immediately after this returns, + // otherwise components that need leader election might continue to run after the leader + // lock was lost. + Start(ctx context.Context) error - // GetRESTMapper returns a RESTMapper - GetRESTMapper() meta.RESTMapper + // GetWebhookServer returns a webhook.Server + GetWebhookServer() webhook.Server - // GetAPIReader returns a reader that will be configured to use the API server. - // This should be used sparingly and only when the client does not fit your - // use case. - GetAPIReader() client.Reader + // GetLogger returns this manager's logger. + GetLogger() logr.Logger - // GetWebhookServer returns a webhook.Server - GetWebhookServer() *webhook.Server + // GetControllerOptions returns controller global configuration options. + GetControllerOptions() config.Controller } -// Options are the arguments for creating a new Manager +// Options are the arguments for creating a new Manager. type Options struct { - // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources + // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources. // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better - // idea to pass your own scheme in. See the documentation in pkg/scheme for more information. + // to pass your own scheme in. See the documentation in pkg/scheme for more information. + // + // If set, the Scheme will be used to create the default Client and Cache. Scheme *runtime.Scheme - // MapperProvider provides the rest mapper used to map go types to Kubernetes APIs - MapperProvider func(c *rest.Config) (meta.RESTMapper, error) + // MapperProvider provides the rest mapper used to map go types to Kubernetes APIs. + // + // If set, the RESTMapper returned by this function is used to create the RESTMapper + // used by the Client and Cache. + MapperProvider func(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) + + // Cache is the cache.Options that will be used to create the default Cache. + // By default, the cache will watch and list requested objects in all namespaces. + Cache cache.Options + + // NewCache is the function that will create the cache to be used + // by the manager. If not set this will use the default new cache function. + // + // When using a custom NewCache, the Cache options will be passed to the + // NewCache function. + // + // NOTE: LOW LEVEL PRIMITIVE! + // Only use a custom NewCache if you know what you are doing. + NewCache cache.NewCacheFunc + + // Client is the client.Options that will be used to create the default Client. + // By default, the client will use the cache for reads and direct calls for writes. + Client client.Options + + // NewClient is the func that creates the client to be used by the manager. + // If not set this will create a Client backed by a Cache for read operations + // and a direct Client for write operations. + // + // When using a custom NewClient, the Client options will be passed to the + // NewClient function. + // + // NOTE: LOW LEVEL PRIMITIVE! + // Only use a custom NewClient if you know what you are doing. + NewClient client.NewClientFunc - // SyncPeriod determines the minimum frequency at which watched resources are - // reconciled. A lower period will correct entropy more quickly, but reduce - // responsiveness to change if there are many watched resources. Change this - // value only if you know what you are doing. Defaults to 10 hours if unset. - SyncPeriod *time.Duration + // Logger is the logger that should be used by this manager. + // If none is set, it defaults to log.Log global logger. + Logger logr.Logger // LeaderElection determines whether or not to use leader election when // starting the manager. LeaderElection bool + // LeaderElectionResourceLock determines which resource lock to use for leader election, + // defaults to "leases". Change this value only if you know what you are doing. + // + // If you are using `configmaps`/`endpoints` resource lock and want to migrate to "leases", + // you might do so by migrating to the respective multilock first ("configmapsleases" or "endpointsleases"), + // which will acquire a leader lock on both resources. + // After all your users have migrated to the multilock, you can go ahead and migrate to "leases". + // Please also keep in mind, that users might skip versions of your controller. + // + // Note: before controller-runtime version v0.7, it was set to "configmaps". + // And from v0.7 to v0.11, the default was "configmapsleases", which was + // used to migrate from configmaps to leases. + // Since the default was "configmapsleases" for over a year, spanning five minor releases, + // any actively maintained operators are very likely to have a released version that uses + // "configmapsleases". Therefore defaulting to "leases" should be safe since v0.12. + // + // So, what do you have to do when you are updating your controller-runtime dependency + // from a lower version to v0.12 or newer? + // - If your operator matches at least one of these conditions: + // - the LeaderElectionResourceLock in your operator has already been explicitly set to "leases" + // - the old controller-runtime version is between v0.7.0 and v0.11.x and the + // LeaderElectionResourceLock wasn't set or was set to "leases"/"configmapsleases"/"endpointsleases" + // feel free to update controller-runtime to v0.12 or newer. + // - Otherwise, you may have to take these steps: + // 1. update controller-runtime to v0.12 or newer in your go.mod + // 2. set LeaderElectionResourceLock to "configmapsleases" (or "endpointsleases") + // 3. package your operator and upgrade it in all your clusters + // 4. only if you have finished 3, you can remove the LeaderElectionResourceLock to use the default "leases" + // Otherwise, your operator might end up with multiple running instances that + // each acquired leadership through different resource locks during upgrades and thus + // act on the same resources concurrently. + LeaderElectionResourceLock string + // LeaderElectionNamespace determines the namespace in which the leader - // election configmap will be created. + // election resource will be created. LeaderElectionNamespace string - // LeaderElectionID determines the name of the configmap that leader election + // LeaderElectionID determines the name of the resource that leader election // will use for holding the leader lock. LeaderElectionID string + // LeaderElectionConfig can be specified to override the default configuration + // that is used to build the leader election client. + LeaderElectionConfig *rest.Config + + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader doesn't have to wait + // LeaseDuration time first. + LeaderElectionReleaseOnCancel bool + + // LeaderElectionLabels allows a controller to supplement all leader election api calls with a set of custom labels based on + // the replica attempting to acquire leader status. + LeaderElectionLabels map[string]string + + // LeaderElectionResourceLockInterface allows to provide a custom resourcelock.Interface that was created outside + // of the controller-runtime. If this value is set the options LeaderElectionID, LeaderElectionNamespace, + // LeaderElectionResourceLock, LeaseDuration, RenewDeadline, RetryPeriod and LeaderElectionLeases will be ignored. + // This can be useful if you want to use a locking mechanism that is currently not supported, like a MultiLock across + // two Kubernetes clusters. + LeaderElectionResourceLockInterface resourcelock.Interface + // LeaseDuration is the duration that non-leader candidates will // wait to force acquire leadership. This is measured against time of // last observed ack. Default is 15 seconds. LeaseDuration *time.Duration - // RenewDeadline is the duration that the acting master will retry + + // RenewDeadline is the duration that the acting controlplane will retry // refreshing leadership before giving up. Default is 10 seconds. RenewDeadline *time.Duration + // RetryPeriod is the duration the LeaderElector clients should wait // between tries of actions. Default is 2 seconds. RetryPeriod *time.Duration - // Namespace if specified restricts the manager's cache to watch objects in - // the desired namespace Defaults to all namespaces - // - // Note: If a namespace is specified, controllers can still Watch for a - // cluster-scoped resource (e.g Node). For namespaced resources the cache - // will only hold objects from the desired namespace. - Namespace string + // Metrics are the metricsserver.Options that will be used to create the metricsserver.Server. + Metrics metricsserver.Options - // MetricsBindAddress is the TCP address that the controller should bind to - // for serving prometheus metrics - MetricsBindAddress string + // HealthProbeBindAddress is the TCP address that the controller should bind to + // for serving health probes + // It can be set to "0" or "" to disable serving the health probe. + HealthProbeBindAddress string - // Port is the port that the webhook server serves at. - // It is used to set webhook.Server.Port. - Port int - // Host is the hostname that the webhook server binds to. - // It is used to set webhook.Server.Host. - Host string + // Readiness probe endpoint name, defaults to "readyz" + ReadinessEndpointName string - // Functions to all for a user to customize the values that will be injected. + // Liveness probe endpoint name, defaults to "healthz" + LivenessEndpointName string - // NewCache is the function that will create the cache to be used - // by the manager. If not set this will use the default new cache function. - NewCache cache.NewCacheFunc + // PprofBindAddress is the TCP address that the controller should bind to + // for serving pprof. + // It can be set to "" or "0" to disable the pprof serving. + // Since pprof may contain sensitive information, make sure to protect it + // before exposing it to public. + PprofBindAddress string + + // WebhookServer is an externally configured webhook.Server. By default, + // a Manager will create a server via webhook.NewServer with default settings. + // If this is set, the Manager will use this server instead. + WebhookServer webhook.Server - // NewClient will create the client to be used by the manager. - // If not set this will create the default DelegatingClient that will - // use the cache for reads and the client for writes. - NewClient NewClientFunc + // BaseContext is the function that provides Context values to Runnables + // managed by the Manager. If a BaseContext function isn't provided, Runnables + // will receive a new Background Context instead. + BaseContext BaseContextFunc + + // EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API + // Use this to customize the event correlator and spam filter + // + // Deprecated: using this may cause goroutine leaks if the lifetime of your manager or controllers + // is shorter than the lifetime of your process. + EventBroadcaster record.EventBroadcaster + + // GracefulShutdownTimeout is the duration given to runnable to stop before the manager actually returns on stop. + // To disable graceful shutdown, set to time.Duration(0) + // To use graceful shutdown without timeout, set to a negative duration, e.G. time.Duration(-1) + // The graceful shutdown is skipped for safety reasons in case the leader election lease is lost. + GracefulShutdownTimeout *time.Duration + + // Controller contains global configuration options for controllers + // registered within this manager. + // +optional + Controller config.Controller + + // makeBroadcaster allows deferring the creation of the broadcaster to + // avoid leaking goroutines if we never call Start on this manager. It also + // returns whether or not this is a "owned" broadcaster, and as such should be + // stopped with the manager. + makeBroadcaster intrec.EventBroadcasterProducer // Dependency injection for testing - newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger) (recorder.Provider, error) - newResourceLock func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) - newMetricsListener func(addr string) (net.Listener, error) + newRecorderProvider func(config *rest.Config, httpClient *http.Client, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) + newResourceLock func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) + newMetricsServer func(options metricsserver.Options, config *rest.Config, httpClient *http.Client) (metricsserver.Server, error) + newHealthProbeListener func(addr string) (net.Listener, error) + newPprofListener func(addr string) (net.Listener, error) } -// NewClientFunc allows a user to define how to create a client -type NewClientFunc func(cache cache.Cache, config *rest.Config, options client.Options) (client.Client, error) +// BaseContextFunc is a function used to provide a base Context to Runnables +// managed by a Manager. +type BaseContextFunc func() context.Context // Runnable allows a component to be started. // It's very important that Start blocks until // it's done running. type Runnable interface { // Start starts running the component. The component will stop running - // when the channel is closed. Start blocks until the channel is closed or + // when the context is closed. Start blocks until the context is closed or // an error occurs. - Start(<-chan struct{}) error + Start(context.Context) error } // RunnableFunc implements Runnable using a function. // It's very important that the given function block // until it's done running. -type RunnableFunc func(<-chan struct{}) error +type RunnableFunc func(context.Context) error -// Start implements Runnable -func (r RunnableFunc) Start(s <-chan struct{}) error { - return r(s) +// Start implements Runnable. +func (r RunnableFunc) Start(ctx context.Context) error { + return r(ctx) } // LeaderElectionRunnable knows if a Runnable needs to be run in the leader election mode. @@ -192,138 +319,207 @@ type LeaderElectionRunnable interface { NeedLeaderElection() bool } +// warmupRunnable knows if a Runnable requires warmup. A warmup runnable is a runnable +// that should be run when the manager is started but before it becomes leader. +// Note: Implementing this interface is only useful when LeaderElection can be enabled, as the +// behavior when leaderelection is not enabled is to run LeaderElectionRunnables immediately. +type warmupRunnable interface { + // Warmup will be called when the manager is started but before it becomes leader. + Warmup(context.Context) error +} + // New returns a new Manager for creating Controllers. +// Note that if ContentType in the given config is not set, "application/vnd.kubernetes.protobuf" +// will be used for all built-in resources of Kubernetes, and "application/json" is for other types +// including all CRD resources. func New(config *rest.Config, options Options) (Manager, error) { - // Initialize a rest.config if none was specified if config == nil { - return nil, fmt.Errorf("must specify Config") + return nil, errors.New("must specify Config") } - // Set default values for options fields options = setOptionsDefaults(options) - // Create the mapper provider - mapper, err := options.MapperProvider(config) - if err != nil { - log.Error(err, "Failed to get API Group-Resources") - return nil, err - } - - // Create the cache for the cached read client and registering informers - cache, err := options.NewCache(config, cache.Options{Scheme: options.Scheme, Mapper: mapper, Resync: options.SyncPeriod, Namespace: options.Namespace}) + cluster, err := cluster.New(config, func(clusterOptions *cluster.Options) { + clusterOptions.Scheme = options.Scheme + clusterOptions.MapperProvider = options.MapperProvider + clusterOptions.Logger = options.Logger + clusterOptions.NewCache = options.NewCache + clusterOptions.NewClient = options.NewClient + clusterOptions.Cache = options.Cache + clusterOptions.Client = options.Client + clusterOptions.EventBroadcaster = options.EventBroadcaster //nolint:staticcheck + }) if err != nil { return nil, err } - apiReader, err := client.New(config, client.Options{Scheme: options.Scheme, Mapper: mapper}) - if err != nil { - return nil, err + config = rest.CopyConfig(config) + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() } - writeObj, err := options.NewClient(cache, config, client.Options{Scheme: options.Scheme, Mapper: mapper}) - if err != nil { - return nil, err - } // Create the recorder provider to inject event recorders for the components. // TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific // to the particular controller that it's being injected into, rather than a generic one like is here. - recorderProvider, err := options.newRecorderProvider(config, options.Scheme, log.WithName("events")) + recorderProvider, err := options.newRecorderProvider(config, cluster.GetHTTPClient(), cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) if err != nil { return nil, err } // Create the resource lock to enable leader election) - resourceLock, err := options.newResourceLock(config, recorderProvider, leaderelection.Options{ - LeaderElection: options.LeaderElection, - LeaderElectionID: options.LeaderElectionID, - LeaderElectionNamespace: options.LeaderElectionNamespace, - }) + var leaderConfig *rest.Config + var leaderRecorderProvider *intrec.Provider + + if options.LeaderElectionConfig == nil { + leaderConfig = rest.CopyConfig(config) + leaderRecorderProvider = recorderProvider + } else { + leaderConfig = rest.CopyConfig(options.LeaderElectionConfig) + scheme := cluster.GetScheme() + err := corev1.AddToScheme(scheme) + if err != nil { + return nil, err + } + err = coordinationv1.AddToScheme(scheme) + if err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(options.LeaderElectionConfig) + if err != nil { + return nil, err + } + leaderRecorderProvider, err = options.newRecorderProvider(leaderConfig, httpClient, scheme, options.Logger.WithName("events"), options.makeBroadcaster) + if err != nil { + return nil, err + } + } + + var resourceLock resourcelock.Interface + if options.LeaderElectionResourceLockInterface != nil && options.LeaderElection { + resourceLock = options.LeaderElectionResourceLockInterface + } else { + resourceLock, err = options.newResourceLock(leaderConfig, leaderRecorderProvider, leaderelection.Options{ + LeaderElection: options.LeaderElection, + LeaderElectionResourceLock: options.LeaderElectionResourceLock, + LeaderElectionID: options.LeaderElectionID, + LeaderElectionNamespace: options.LeaderElectionNamespace, + RenewDeadline: *options.RenewDeadline, + LeaderLabels: options.LeaderElectionLabels, + }) + if err != nil { + return nil, err + } + } + + // Create the metrics server. + metricsServer, err := options.newMetricsServer(options.Metrics, config, cluster.GetHTTPClient()) if err != nil { return nil, err } - // Create the mertics listener. This will throw an error if the metrics bind + // Create health probes listener. This will throw an error if the bind // address is invalid or already in use. - metricsListener, err := options.newMetricsListener(options.MetricsBindAddress) + healthProbeListener, err := options.newHealthProbeListener(options.HealthProbeBindAddress) if err != nil { return nil, err } - stop := make(chan struct{}) + // Create pprof listener. This will throw an error if the bind + // address is invalid or already in use. + pprofListener, err := options.newPprofListener(options.PprofBindAddress) + if err != nil { + return nil, fmt.Errorf("failed to new pprof listener: %w", err) + } + errChan := make(chan error, 1) + runnables := newRunnables(options.BaseContext, errChan).withLogger(options.Logger) return &controllerManager{ - config: config, - scheme: options.Scheme, - errChan: make(chan error), - cache: cache, - fieldIndexes: cache, - client: writeObj, - apiReader: apiReader, - recorderProvider: recorderProvider, - resourceLock: resourceLock, - mapper: mapper, - metricsListener: metricsListener, - internalStop: stop, - internalStopper: stop, - port: options.Port, - host: options.Host, - leaseDuration: *options.LeaseDuration, - renewDeadline: *options.RenewDeadline, - retryPeriod: *options.RetryPeriod, + stopProcedureEngaged: ptr.To(int64(0)), + cluster: cluster, + runnables: runnables, + errChan: errChan, + recorderProvider: recorderProvider, + resourceLock: resourceLock, + metricsServer: metricsServer, + controllerConfig: options.Controller, + logger: options.Logger, + elected: make(chan struct{}), + webhookServer: options.WebhookServer, + leaderElectionID: options.LeaderElectionID, + leaseDuration: *options.LeaseDuration, + renewDeadline: *options.RenewDeadline, + retryPeriod: *options.RetryPeriod, + healthProbeListener: healthProbeListener, + readinessEndpointName: options.ReadinessEndpointName, + livenessEndpointName: options.LivenessEndpointName, + pprofListener: pprofListener, + gracefulShutdownTimeout: *options.GracefulShutdownTimeout, + internalProceduresStop: make(chan struct{}), + leaderElectionStopped: make(chan struct{}), + leaderElectionReleaseOnCancel: options.LeaderElectionReleaseOnCancel, }, nil } -// defaultNewClient creates the default caching client -func defaultNewClient(cache cache.Cache, config *rest.Config, options client.Options) (client.Client, error) { - // Create the Client for Write operations. - c, err := client.New(config, options) - if err != nil { - return nil, err +// defaultHealthProbeListener creates the default health probes listener bound to the given address. +func defaultHealthProbeListener(addr string) (net.Listener, error) { + if addr == "" || addr == "0" { + return nil, nil } - return &client.DelegatingClient{ - Reader: &client.DelegatingReader{ - CacheReader: cache, - ClientReader: c, - }, - Writer: c, - StatusClient: c, - }, nil + ln, err := net.Listen("tcp", addr) + if err != nil { + return nil, fmt.Errorf("error listening on %s: %w", addr, err) + } + return ln, nil } -// setOptionsDefaults set default values for Options fields -func setOptionsDefaults(options Options) Options { - // Use the Kubernetes client-go scheme if none is specified - if options.Scheme == nil { - options.Scheme = scheme.Scheme +// defaultPprofListener creates the default pprof listener bound to the given address. +func defaultPprofListener(addr string) (net.Listener, error) { + if addr == "" || addr == "0" { + return nil, nil } - if options.MapperProvider == nil { - options.MapperProvider = apiutil.NewDiscoveryRESTMapper + ln, err := net.Listen("tcp", addr) + if err != nil { + return nil, fmt.Errorf("error listening on %s: %w", addr, err) } + return ln, nil +} - // Allow newClient to be mocked - if options.NewClient == nil { - options.NewClient = defaultNewClient - } +// defaultBaseContext is used as the BaseContext value in Options if one +// has not already been set. +func defaultBaseContext() context.Context { + return context.Background() +} - // Allow newCache to be mocked - if options.NewCache == nil { - options.NewCache = cache.New +// setOptionsDefaults set default values for Options fields. +func setOptionsDefaults(options Options) Options { + // Allow newResourceLock to be mocked + if options.newResourceLock == nil { + options.newResourceLock = leaderelection.NewResourceLock } // Allow newRecorderProvider to be mocked if options.newRecorderProvider == nil { - options.newRecorderProvider = internalrecorder.NewProvider + options.newRecorderProvider = intrec.NewProvider } - // Allow newResourceLock to be mocked - if options.newResourceLock == nil { - options.newResourceLock = leaderelection.NewResourceLock + // This is duplicated with pkg/cluster, we need it here + // for the leader election and there to provide the user with + // an EventBroadcaster + if options.EventBroadcaster == nil { + // defer initialization to avoid leaking by default + options.makeBroadcaster = func() (record.EventBroadcaster, bool) { + return record.NewBroadcaster(), true + } + } else { + options.makeBroadcaster = func() (record.EventBroadcaster, bool) { + return options.EventBroadcaster, false + } } - if options.newMetricsListener == nil { - options.newMetricsListener = metrics.NewListener + if options.newMetricsServer == nil { + options.newMetricsServer = metricsserver.NewServer } leaseDuration, renewDeadline, retryPeriod := defaultLeaseDuration, defaultRenewDeadline, defaultRetryPeriod if options.LeaseDuration == nil { @@ -338,5 +534,42 @@ func setOptionsDefaults(options Options) Options { options.RetryPeriod = &retryPeriod } + if options.ReadinessEndpointName == "" { + options.ReadinessEndpointName = defaultReadinessEndpoint + } + + if options.LivenessEndpointName == "" { + options.LivenessEndpointName = defaultLivenessEndpoint + } + + if options.newHealthProbeListener == nil { + options.newHealthProbeListener = defaultHealthProbeListener + } + + if options.newPprofListener == nil { + options.newPprofListener = defaultPprofListener + } + + if options.GracefulShutdownTimeout == nil { + gracefulShutdownTimeout := defaultGracefulShutdownPeriod + options.GracefulShutdownTimeout = &gracefulShutdownTimeout + } + + if options.Logger.GetSink() == nil { + options.Logger = log.Log + } + + if options.Controller.Logger.GetSink() == nil { + options.Controller.Logger = options.Logger + } + + if options.BaseContext == nil { + options.BaseContext = defaultBaseContext + } + + if options.WebhookServer == nil { + options.WebhookServer = webhook.NewServer(webhook.Options{}) + } + return options } diff --git a/pkg/manager/manager_suite_test.go b/pkg/manager/manager_suite_test.go index 9dad53c532..7fbf7184ac 100644 --- a/pkg/manager/manager_suite_test.go +++ b/pkg/manager/manager_suite_test.go @@ -17,29 +17,34 @@ limitations under the License. package manager import ( + "fmt" + "net/http" "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" - "sigs.k8s.io/controller-runtime/pkg/metrics" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" ) func TestSource(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, "Controller Integration Suite", []Reporter{envtest.NewlineReporter{}}) + RunSpecs(t, "Manager Suite") } var testenv *envtest.Environment var cfg *rest.Config var clientset *kubernetes.Clientset -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) +// clientTransport is used to force-close keep-alives in tests that check for leaks. +var clientTransport *http.Transport + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) testenv = &envtest.Environment{} @@ -47,18 +52,29 @@ var _ = BeforeSuite(func(done Done) { cfg, err = testenv.Start() Expect(err).NotTo(HaveOccurred()) + cfg.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { + // NB(directxman12): we can't set Transport *and* use TLS options, + // so we grab the transport right after it gets created so that we can + // type-assert on it (hopefully)? + // hopefully this doesn't break 🤞 + transport, isTransport := rt.(*http.Transport) + if !isTransport { + panic(fmt.Sprintf("wasn't able to grab underlying transport from REST client's RoundTripper, can't figure out how to close keep-alives: expected an *http.Transport, got %#v", rt)) + } + clientTransport = transport + return rt + } + clientset, err = kubernetes.NewForConfig(cfg) Expect(err).NotTo(HaveOccurred()) // Prevent the metrics listener being created - metrics.DefaultBindAddress = "0" - - close(done) -}, 60) + metricsserver.DefaultBindAddress = "0" +}) var _ = AfterSuite(func() { Expect(testenv.Stop()).To(Succeed()) // Put the DefaultBindAddress back - metrics.DefaultBindAddress = ":8080" + metricsserver.DefaultBindAddress = ":8080" }) diff --git a/pkg/manager/manager_test.go b/pkg/manager/manager_test.go index 4141e56593..4363d62f59 100644 --- a/pkg/manager/manager_test.go +++ b/pkg/manager/manager_test.go @@ -17,41 +17,44 @@ limitations under the License. package manager import ( + "context" + "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" + "path" + "sync" + "sync/atomic" + "time" "github.com/go-logr/logr" - . "github.com/onsi/ginkgo" + "github.com/go-logr/logr/funcr" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/prometheus/client_golang/prometheus" + "go.uber.org/goleak" + coordinationv1 "k8s.io/api/coordination/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/rest" "k8s.io/client-go/tools/leaderelection/resourcelock" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/cache/informertest" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" "sigs.k8s.io/controller-runtime/pkg/leaderelection" fakeleaderelection "sigs.k8s.io/controller-runtime/pkg/leaderelection/fake" "sigs.k8s.io/controller-runtime/pkg/metrics" - "sigs.k8s.io/controller-runtime/pkg/reconcile" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/recorder" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/webhook" ) var _ = Describe("manger.Manager", func() { - var stop chan struct{} - - BeforeEach(func() { - stop = make(chan struct{}) - }) - - AfterEach(func() { - close(stop) - }) - Describe("New", func() { It("should return an error if there is no Config", func() { m, err := New(nil, Options{}) @@ -63,27 +66,25 @@ var _ = Describe("manger.Manager", func() { It("should return an error if it can't create a RestMapper", func() { expected := fmt.Errorf("expected error: RestMapper") m, err := New(cfg, Options{ - MapperProvider: func(c *rest.Config) (meta.RESTMapper, error) { return nil, expected }, + MapperProvider: func(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { return nil, expected }, }) Expect(m).To(BeNil()) Expect(err).To(Equal(expected)) }) - It("should return an error it can't create a client.Client", func(done Done) { + It("should return an error it can't create a client.Client", func() { m, err := New(cfg, Options{ - NewClient: func(cache cache.Cache, config *rest.Config, options client.Options) (client.Client, error) { - return nil, fmt.Errorf("expected error") + NewClient: func(config *rest.Config, options client.Options) (client.Client, error) { + return nil, errors.New("expected error") }, }) Expect(m).To(BeNil()) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("expected error")) - - close(done) }) - It("should return an error it can't create a cache.Cache", func(done Done) { + It("should return an error it can't create a cache.Cache", func() { m, err := New(cfg, Options{ NewCache: func(config *rest.Config, opts cache.Options) (cache.Cache, error) { return nil, fmt.Errorf("expected error") @@ -92,66 +93,336 @@ var _ = Describe("manger.Manager", func() { Expect(m).To(BeNil()) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("expected error")) - - close(done) }) - It("should create a client defined in by the new client function", func(done Done) { + It("should create a client defined in by the new client function", func() { m, err := New(cfg, Options{ - NewClient: func(cache cache.Cache, config *rest.Config, options client.Options) (client.Client, error) { + NewClient: func(config *rest.Config, options client.Options) (client.Client, error) { return nil, nil }, }) Expect(m).ToNot(BeNil()) Expect(err).ToNot(HaveOccurred()) Expect(m.GetClient()).To(BeNil()) - - close(done) }) - It("should return an error it can't create a recorder.Provider", func(done Done) { + It("should return an error it can't create a recorder.Provider", func() { m, err := New(cfg, Options{ - newRecorderProvider: func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger) (recorder.Provider, error) { + newRecorderProvider: func(_ *rest.Config, _ *http.Client, _ *runtime.Scheme, _ logr.Logger, _ intrec.EventBroadcasterProducer) (*intrec.Provider, error) { return nil, fmt.Errorf("expected error") }, }) Expect(m).To(BeNil()) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("expected error")) - - close(done) }) - It("should lazily initialize a webhook server if needed", func(done Done) { + It("should lazily initialize a webhook server if needed", func() { By("creating a manager with options") - m, err := New(cfg, Options{Port: 9443, Host: "foo.com"}) + m, err := New(cfg, Options{WebhookServer: webhook.NewServer(webhook.Options{Port: 9440, Host: "foo.com"})}) Expect(err).NotTo(HaveOccurred()) Expect(m).NotTo(BeNil()) By("checking options are passed to the webhook server") svr := m.GetWebhookServer() Expect(svr).NotTo(BeNil()) - Expect(svr.Port).To(Equal(9443)) - Expect(svr.Host).To(Equal("foo.com")) + Expect(svr.(*webhook.DefaultServer).Options.Port).To(Equal(9440)) + Expect(svr.(*webhook.DefaultServer).Options.Host).To(Equal("foo.com")) + }) - close(done) + It("should not initialize a webhook server if Options.WebhookServer is set", func() { + By("creating a manager with options") + srv := webhook.NewServer(webhook.Options{Port: 9440}) + m, err := New(cfg, Options{WebhookServer: srv}) + Expect(err).NotTo(HaveOccurred()) + Expect(m).NotTo(BeNil()) + + By("checking the server contains the Port set on the webhook server and not passed to Options") + svr := m.GetWebhookServer() + Expect(svr).NotTo(BeNil()) + Expect(svr).To(Equal(srv)) + Expect(svr.(*webhook.DefaultServer).Options.Port).To(Equal(9440)) + }) + + It("should allow passing a custom webhook.Server implementation", func() { + type customWebhook struct { + webhook.Server + } + m, err := New(cfg, Options{WebhookServer: customWebhook{}}) + Expect(err).NotTo(HaveOccurred()) + Expect(m).NotTo(BeNil()) + + svr := m.GetWebhookServer() + Expect(svr).NotTo(BeNil()) + + _, isCustomWebhook := svr.(customWebhook) + Expect(isCustomWebhook).To(BeTrue()) }) Context("with leader election enabled", func() { - It("should default ID to controller-runtime if ID is not set", func() { - var rl resourcelock.Interface + It("should only cancel the leader election after all runnables are done", func(specCtx SpecContext) { + m, err := New(cfg, Options{ + LeaderElection: true, + LeaderElectionNamespace: "default", + LeaderElectionID: "test-leader-election-id-2", + HealthProbeBindAddress: "0", + Metrics: metricsserver.Options{BindAddress: "0"}, + PprofBindAddress: "0", + }) + Expect(err).ToNot(HaveOccurred()) + gvkcorev1 := schema.GroupVersionKind{Group: corev1.SchemeGroupVersion.Group, Version: corev1.SchemeGroupVersion.Version, Kind: "ConfigMap"} + gvkcoordinationv1 := schema.GroupVersionKind{Group: coordinationv1.SchemeGroupVersion.Group, Version: coordinationv1.SchemeGroupVersion.Version, Kind: "Lease"} + Expect(m.GetScheme().Recognizes(gvkcorev1)).To(BeTrue()) + Expect(m.GetScheme().Recognizes(gvkcoordinationv1)).To(BeTrue()) + runnableDone := make(chan struct{}) + slowRunnable := RunnableFunc(func(ctx context.Context) error { + <-ctx.Done() + time.Sleep(100 * time.Millisecond) + close(runnableDone) + return nil + }) + Expect(m.Add(slowRunnable)).To(Succeed()) + + cm := m.(*controllerManager) + cm.gracefulShutdownTimeout = time.Second + leaderElectionDone := make(chan struct{}) + cm.onStoppedLeading = func() { + close(leaderElectionDone) + } + + ctx, cancel := context.WithCancel(specCtx) + mgrDone := make(chan struct{}) + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).To(Succeed()) + close(mgrDone) + }() + <-cm.Elected() + cancel() + select { + case <-leaderElectionDone: + Expect(errors.New("leader election was cancelled before runnables were done")).ToNot(HaveOccurred()) + case <-runnableDone: + // Success + } + // Don't leak routines + <-mgrDone + + }) + It("should disable gracefulShutdown when stopping to lead", func(ctx SpecContext) { m, err := New(cfg, Options{ LeaderElection: true, LeaderElectionNamespace: "default", + LeaderElectionID: "test-leader-election-id-3", + HealthProbeBindAddress: "0", + Metrics: metricsserver.Options{BindAddress: "0"}, + PprofBindAddress: "0", + }) + Expect(err).ToNot(HaveOccurred()) + + mgrDone := make(chan struct{}) + go func() { + defer GinkgoRecover() + err := m.Start(ctx) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("leader election lost")) + close(mgrDone) + }() + cm := m.(*controllerManager) + <-cm.elected + + cm.leaderElectionCancel() + <-mgrDone + + Expect(cm.gracefulShutdownTimeout.Nanoseconds()).To(Equal(int64(0))) + }) + + It("should prevent leader election when shutting down a non-elected manager", func(specCtx SpecContext) { + var rl resourcelock.Interface + m1, err := New(cfg, Options{ + LeaderElection: true, + LeaderElectionNamespace: "default", + LeaderElectionID: "test-leader-election-id", newResourceLock: func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) { var err error rl, err = leaderelection.NewResourceLock(config, recorderProvider, options) return rl, err }, + HealthProbeBindAddress: "0", + Metrics: metricsserver.Options{BindAddress: "0"}, + PprofBindAddress: "0", + }) + Expect(err).ToNot(HaveOccurred()) + Expect(m1).ToNot(BeNil()) + Expect(rl.Describe()).To(Equal("default/test-leader-election-id")) + + m1cm, ok := m1.(*controllerManager) + Expect(ok).To(BeTrue()) + m1cm.onStoppedLeading = func() {} + + m2, err := New(cfg, Options{ + LeaderElection: true, + LeaderElectionNamespace: "default", + LeaderElectionID: "test-leader-election-id", + newResourceLock: func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) { + var err error + rl, err = leaderelection.NewResourceLock(config, recorderProvider, options) + return rl, err + }, + HealthProbeBindAddress: "0", + Metrics: metricsserver.Options{BindAddress: "0"}, + PprofBindAddress: "0", + }) + Expect(err).ToNot(HaveOccurred()) + Expect(m2).ToNot(BeNil()) + Expect(rl.Describe()).To(Equal("default/test-leader-election-id")) + + m1done := make(chan struct{}) + Expect(m1.Add(RunnableFunc(func(ctx context.Context) error { + defer GinkgoRecover() + close(m1done) + return nil + }))).To(Succeed()) + + go func() { + defer GinkgoRecover() + Expect(m1.Elected()).ShouldNot(BeClosed()) + Expect(m1.Start(specCtx)).NotTo(HaveOccurred()) + }() + <-m1.Elected() + <-m1done + + electionRunnable := &needElection{make(chan struct{})} + + Expect(m2.Add(electionRunnable)).To(Succeed()) + + ctx2, cancel2 := context.WithCancel(specCtx) + m2done := make(chan struct{}) + go func() { + defer GinkgoRecover() + Expect(m2.Start(ctx2)).NotTo(HaveOccurred()) + close(m2done) + }() + Consistently(m2.Elected()).ShouldNot(Receive()) + + go func() { + defer GinkgoRecover() + Consistently(electionRunnable.ch).ShouldNot(Receive()) + }() + cancel2() + <-m2done + }) + + It("should default RenewDeadline for leader election config", func() { + var rl resourcelock.Interface + m1, err := New(cfg, Options{ + LeaderElection: true, + LeaderElectionNamespace: "default", + LeaderElectionID: "test-leader-election-id", + newResourceLock: func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) { + if options.RenewDeadline != 10*time.Second { + return nil, fmt.Errorf("expected RenewDeadline to be 10s, got %v", options.RenewDeadline) + } + var err error + rl, err = leaderelection.NewResourceLock(config, recorderProvider, options) + return rl, err + }, + HealthProbeBindAddress: "0", + Metrics: metricsserver.Options{BindAddress: "0"}, + PprofBindAddress: "0", + }) + Expect(err).ToNot(HaveOccurred()) + Expect(m1).ToNot(BeNil()) + }) + + It("should default ID to controller-runtime if ID is not set", func(specCtx SpecContext) { + var rl resourcelock.Interface + m1, err := New(cfg, Options{ + LeaderElection: true, + LeaderElectionNamespace: "default", + LeaderElectionID: "test-leader-election-id", + newResourceLock: func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) { + var err error + rl, err = leaderelection.NewResourceLock(config, recorderProvider, options) + return rl, err + }, + HealthProbeBindAddress: "0", + Metrics: metricsserver.Options{BindAddress: "0"}, + PprofBindAddress: "0", + }) + Expect(err).ToNot(HaveOccurred()) + Expect(m1).ToNot(BeNil()) + Expect(rl.Describe()).To(Equal("default/test-leader-election-id")) + + m1cm, ok := m1.(*controllerManager) + Expect(ok).To(BeTrue()) + m1cm.onStoppedLeading = func() {} + + m2, err := New(cfg, Options{ + LeaderElection: true, + LeaderElectionNamespace: "default", + LeaderElectionID: "test-leader-election-id", + newResourceLock: func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) { + var err error + rl, err = leaderelection.NewResourceLock(config, recorderProvider, options) + return rl, err + }, + HealthProbeBindAddress: "0", + Metrics: metricsserver.Options{BindAddress: "0"}, + PprofBindAddress: "0", }) - Expect(m).ToNot(BeNil()) Expect(err).ToNot(HaveOccurred()) - Expect(rl.Describe()).To(Equal("default/controller-leader-election-helper")) + Expect(m2).ToNot(BeNil()) + Expect(rl.Describe()).To(Equal("default/test-leader-election-id")) + + m2cm, ok := m2.(*controllerManager) + Expect(ok).To(BeTrue()) + m2cm.onStoppedLeading = func() {} + + c1 := make(chan struct{}) + Expect(m1.Add(RunnableFunc(func(ctx context.Context) error { + defer GinkgoRecover() + close(c1) + return nil + }))).To(Succeed()) + + go func() { + defer GinkgoRecover() + Expect(m1.Elected()).ShouldNot(BeClosed()) + Expect(m1.Start(specCtx)).NotTo(HaveOccurred()) + }() + <-m1.Elected() + <-c1 + + c2 := make(chan struct{}) + Expect(m2.Add(RunnableFunc(func(context.Context) error { + defer GinkgoRecover() + close(c2) + return nil + }))).To(Succeed()) + + ctx2, cancel := context.WithCancel(specCtx) + m2done := make(chan struct{}) + go func() { + defer GinkgoRecover() + Expect(m2.Start(ctx2)).NotTo(HaveOccurred()) + close(m2done) + }() + Consistently(m2.Elected()).ShouldNot(Receive()) + + Consistently(c2).ShouldNot(Receive()) + cancel() + <-m2done + }) + + It("should return an error if it can't create a ResourceLock", func() { + m, err := New(cfg, Options{ + newResourceLock: func(_ *rest.Config, _ recorder.Provider, _ leaderelection.Options) (resourcelock.Interface, error) { + return nil, fmt.Errorf("expected error") + }, + }) + Expect(m).To(BeNil()) + Expect(err).To(MatchError(ContainSubstring("expected error"))) }) It("should return an error if namespace not set and not running in cluster", func() { @@ -160,15 +431,215 @@ var _ = Describe("manger.Manager", func() { Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("unable to find leader election namespace: not running in-cluster, please specify LeaderElectionNamespace")) }) + + // We must keep this default until we are sure all controller-runtime users have upgraded from the original default + // ConfigMap lock to a controller-runtime version that has this new default. Many users of controller-runtime skip + // versions, so we should be extremely conservative here. + It("should default to LeasesResourceLock", func() { + m, err := New(cfg, Options{LeaderElection: true, LeaderElectionID: "controller-runtime", LeaderElectionNamespace: "my-ns"}) + Expect(m).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + cm, ok := m.(*controllerManager) + Expect(ok).To(BeTrue()) + _, isLeaseLock := cm.resourceLock.(*resourcelock.LeaseLock) + Expect(isLeaseLock).To(BeTrue()) + + }) + It("should use the specified ResourceLock", func() { + m, err := New(cfg, Options{ + LeaderElection: true, + LeaderElectionResourceLock: resourcelock.LeasesResourceLock, + LeaderElectionID: "controller-runtime", + LeaderElectionNamespace: "my-ns", + }) + Expect(m).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + cm, ok := m.(*controllerManager) + Expect(ok).To(BeTrue()) + _, isLeaseLock := cm.resourceLock.(*resourcelock.LeaseLock) + Expect(isLeaseLock).To(BeTrue()) + }) + It("should release lease if ElectionReleaseOnCancel is true", func(specCtx SpecContext) { + var rl resourcelock.Interface + m, err := New(cfg, Options{ + LeaderElection: true, + LeaderElectionResourceLock: resourcelock.LeasesResourceLock, + LeaderElectionID: "controller-runtime", + LeaderElectionNamespace: "my-ns", + LeaderElectionReleaseOnCancel: true, + newResourceLock: func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) { + var err error + rl, err = fakeleaderelection.NewResourceLock(config, recorderProvider, options) + return rl, err + }, + }) + Expect(err).ToNot(HaveOccurred()) + + ctx, cancel := context.WithCancel(specCtx) + doneCh := make(chan struct{}) + go func() { + defer GinkgoRecover() + defer close(doneCh) + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + <-m.(*controllerManager).elected + cancel() + <-doneCh + + record, _, err := rl.Get(specCtx) + Expect(err).ToNot(HaveOccurred()) + Expect(record.HolderIdentity).To(BeEmpty()) + }) + It("should set the leaselocks's label field when LeaderElectionLabels is set", func() { + labels := map[string]string{"my-key": "my-val"} + m, err := New(cfg, Options{ + LeaderElection: true, + LeaderElectionResourceLock: resourcelock.LeasesResourceLock, + LeaderElectionID: "controller-runtime", + LeaderElectionNamespace: "default", + LeaderElectionLabels: labels, + }) + Expect(err).ToNot(HaveOccurred()) + Expect(m).ToNot(BeNil()) + cm, ok := m.(*controllerManager) + Expect(ok).To(BeTrue()) + ll, isLeaseLock := cm.resourceLock.(*resourcelock.LeaseLock) + Expect(isLeaseLock).To(BeTrue()) + val, exists := ll.Labels["my-key"] + Expect(exists).To(BeTrue()) + Expect(val).To(Equal("my-val")) + }) + When("using a custom LeaderElectionResourceLockInterface", func() { + It("should use the custom LeaderElectionResourceLockInterface", func() { + rl, err := fakeleaderelection.NewResourceLock(nil, nil, leaderelection.Options{}) + Expect(err).NotTo(HaveOccurred()) + + m, err := New(cfg, Options{ + LeaderElection: true, + LeaderElectionResourceLockInterface: rl, + newResourceLock: func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) { + return nil, fmt.Errorf("this should not be called") + }, + }) + Expect(m).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + cm, ok := m.(*controllerManager) + Expect(ok).To(BeTrue()) + Expect(cm.resourceLock).To(Equal(rl)) + }) + }) + }) + + It("should create a metrics server if a valid address is provided", func(specCtx SpecContext) { + var srv metricsserver.Server + m, err := New(cfg, Options{ + Metrics: metricsserver.Options{BindAddress: ":0"}, + newMetricsServer: func(options metricsserver.Options, config *rest.Config, httpClient *http.Client) (metricsserver.Server, error) { + var err error + srv, err = metricsserver.NewServer(options, config, httpClient) + return srv, err + }, + }) + Expect(m).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + Expect(srv).ToNot(BeNil()) + + // Triggering the metric server start here manually to test if it works. + // Usually this happens later during manager.Start(). + ctx, cancel := context.WithTimeout(specCtx, 5*time.Second) + Expect(srv.Start(ctx)).To(Succeed()) + cancel() + }) + + It("should create a metrics server if a valid address is provided and secure serving is enabled", func(specCtx SpecContext) { + var srv metricsserver.Server + m, err := New(cfg, Options{ + Metrics: metricsserver.Options{BindAddress: ":0", SecureServing: true}, + newMetricsServer: func(options metricsserver.Options, config *rest.Config, httpClient *http.Client) (metricsserver.Server, error) { + var err error + srv, err = metricsserver.NewServer(options, config, httpClient) + return srv, err + }, + }) + Expect(m).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + Expect(srv).ToNot(BeNil()) + + // Triggering the metric server start here manually to test if it works. + // Usually this happens later during manager.Start(). + ctx, cancel := context.WithTimeout(specCtx, 5*time.Second) + Expect(srv.Start(ctx)).To(Succeed()) + cancel() }) - It("should create a listener for the metrics if a valid address is provided", func() { + It("should be able to create a manager with a cache that fails on missing informer", func() { + m, err := New(cfg, Options{ + Cache: cache.Options{ + ReaderFailOnMissingInformer: true, + }, + }) + Expect(m).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + }) + + It("should return an error if the metrics bind address is already in use", func(ctx SpecContext) { + ln, err := net.Listen("tcp", ":0") + Expect(err).ShouldNot(HaveOccurred()) + + var srv metricsserver.Server + m, err := New(cfg, Options{ + Metrics: metricsserver.Options{ + BindAddress: ln.Addr().String(), + }, + newMetricsServer: func(options metricsserver.Options, config *rest.Config, httpClient *http.Client) (metricsserver.Server, error) { + var err error + srv, err = metricsserver.NewServer(options, config, httpClient) + return srv, err + }, + }) + Expect(m).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + + // Triggering the metric server start here manually to test if it works. + // Usually this happens later during manager.Start(). + Expect(srv.Start(ctx)).ToNot(Succeed()) + + Expect(ln.Close()).To(Succeed()) + }) + + It("should return an error if the metrics bind address is already in use and secure serving enabled", func(ctx SpecContext) { + ln, err := net.Listen("tcp", ":0") + Expect(err).ShouldNot(HaveOccurred()) + + var srv metricsserver.Server + m, err := New(cfg, Options{ + Metrics: metricsserver.Options{ + BindAddress: ln.Addr().String(), + SecureServing: true, + }, + newMetricsServer: func(options metricsserver.Options, config *rest.Config, httpClient *http.Client) (metricsserver.Server, error) { + var err error + srv, err = metricsserver.NewServer(options, config, httpClient) + return srv, err + }, + }) + Expect(m).ToNot(BeNil()) + Expect(err).ToNot(HaveOccurred()) + + // Triggering the metric server start here manually to test if it works. + // Usually this happens later during manager.Start(). + Expect(srv.Start(ctx)).ToNot(Succeed()) + + Expect(ln.Close()).To(Succeed()) + }) + + It("should create a listener for the health probes if a valid address is provided", func() { var listener net.Listener m, err := New(cfg, Options{ - MetricsBindAddress: ":0", - newMetricsListener: func(addr string) (net.Listener, error) { + HealthProbeBindAddress: ":0", + newHealthProbeListener: func(addr string) (net.Listener, error) { var err error - listener, err = metrics.NewListener(addr) + listener, err = defaultHealthProbeListener(addr) return listener, err }, }) @@ -178,16 +649,16 @@ var _ = Describe("manger.Manager", func() { Expect(listener.Close()).ToNot(HaveOccurred()) }) - It("should return an error if the metrics bind address is already in use", func() { - ln, err := metrics.NewListener(":0") + It("should return an error if the health probes bind address is already in use", func() { + ln, err := defaultHealthProbeListener(":0") Expect(err).ShouldNot(HaveOccurred()) var listener net.Listener m, err := New(cfg, Options{ - MetricsBindAddress: ln.Addr().String(), - newMetricsListener: func(addr string) (net.Listener, error) { + HealthProbeBindAddress: ln.Addr().String(), + newHealthProbeListener: func(addr string) (net.Listener, error) { var err error - listener, err = metrics.NewListener(addr) + listener, err = defaultHealthProbeListener(addr) return listener, err }, }) @@ -200,94 +671,592 @@ var _ = Describe("manger.Manager", func() { }) Describe("Start", func() { - var startSuite = func(options Options) { - It("should Start each Component", func(done Done) { + var startSuite = func(options Options, callbacks ...func(Manager)) { + It("should Start each Component", func(ctx SpecContext) { m, err := New(cfg, options) Expect(err).NotTo(HaveOccurred()) - c1 := make(chan struct{}) - Expect(m.Add(RunnableFunc(func(s <-chan struct{}) error { - defer close(c1) + for _, cb := range callbacks { + cb(m) + } + var wgRunnableStarted sync.WaitGroup + wgRunnableStarted.Add(2) + Expect(m.Add(RunnableFunc(func(context.Context) error { defer GinkgoRecover() + wgRunnableStarted.Done() return nil }))).To(Succeed()) - c2 := make(chan struct{}) - Expect(m.Add(RunnableFunc(func(s <-chan struct{}) error { - defer close(c2) + Expect(m.Add(RunnableFunc(func(context.Context) error { defer GinkgoRecover() + wgRunnableStarted.Done() return nil }))).To(Succeed()) go func() { defer GinkgoRecover() - Expect(m.Start(stop)).NotTo(HaveOccurred()) + Expect(m.Elected()).ShouldNot(BeClosed()) + Expect(m.Start(ctx)).NotTo(HaveOccurred()) }() - <-c1 - <-c2 - close(done) + <-m.Elected() + wgRunnableStarted.Wait() + }) + + It("should not manipulate the provided config", func() { + // strip WrapTransport, cause func values are PartialEq, not Eq -- + // specifically, for reflect.DeepEqual, for all functions F, + // F != nil implies F != F, which means no full equivalence relation. + cfg := rest.CopyConfig(cfg) + cfg.WrapTransport = nil + originalCfg := rest.CopyConfig(cfg) + // The options object is shared by multiple tests, copy it + // into our scope so we manipulate it for this testcase only + options := options + options.newResourceLock = nil + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + Expect(m.GetConfig()).To(Equal(originalCfg)) + }) + + It("should stop when context is cancelled", func(specCtx SpecContext) { + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + ctx, cancel := context.WithCancel(specCtx) + cancel() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }) + + It("should return an error if it can't start the cache", func(ctx SpecContext) { + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + mgr, ok := m.(*controllerManager) + Expect(ok).To(BeTrue()) + Expect(mgr.Add( + &cacheProvider{cache: &informertest.FakeInformers{Error: fmt.Errorf("expected error")}}, + )).To(Succeed()) + + Expect(m.Start(ctx)).To(MatchError(ContainSubstring("expected error"))) + }) + + It("should start the cache before starting anything else", func(ctx SpecContext) { + fakeCache := &startSignalingInformer{Cache: &informertest.FakeInformers{}} + options.NewCache = func(_ *rest.Config, _ cache.Options) (cache.Cache, error) { + return fakeCache, nil + } + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + + runnableWasStarted := make(chan struct{}) + runnable := RunnableFunc(func(ctx context.Context) error { + defer GinkgoRecover() + if !fakeCache.wasSynced { + return errors.New("runnable got started before cache was synced") + } + close(runnableWasStarted) + return nil + }) + Expect(m.Add(runnable)).To(Succeed()) + + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).ToNot(HaveOccurred()) + }() + + <-runnableWasStarted + }) + + It("should start additional clusters before anything else", func(ctx SpecContext) { + fakeCache := &startSignalingInformer{Cache: &informertest.FakeInformers{}} + options.NewCache = func(_ *rest.Config, _ cache.Options) (cache.Cache, error) { + return fakeCache, nil + } + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + + additionalClusterCache := &startSignalingInformer{Cache: &informertest.FakeInformers{}} + additionalCluster, err := cluster.New(cfg, func(o *cluster.Options) { + o.NewCache = func(_ *rest.Config, _ cache.Options) (cache.Cache, error) { + return additionalClusterCache, nil + } + }) + Expect(err).NotTo(HaveOccurred()) + Expect(m.Add(additionalCluster)).NotTo(HaveOccurred()) + + runnableWasStarted := make(chan struct{}) + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + defer GinkgoRecover() + if !fakeCache.wasSynced { + return errors.New("WaitForCacheSyncCalled wasn't called before Runnable got started") + } + if !additionalClusterCache.wasSynced { + return errors.New("the additional clusters WaitForCacheSync wasn't called before Runnable got started") + } + close(runnableWasStarted) + return nil + }))).To(Succeed()) + + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).ToNot(HaveOccurred()) + }() + + <-runnableWasStarted + }) + + It("should return an error if any Components fail to Start", func(ctx SpecContext) { + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + defer GinkgoRecover() + <-ctx.Done() + return nil + }))).To(Succeed()) + + Expect(m.Add(RunnableFunc(func(context.Context) error { + defer GinkgoRecover() + return fmt.Errorf("expected error") + }))).To(Succeed()) + + Expect(m.Add(RunnableFunc(func(context.Context) error { + defer GinkgoRecover() + return nil + }))).To(Succeed()) + + err = m.Start(ctx) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("expected error")) + }) + + It("should start caches added after Manager has started", func(ctx SpecContext) { + fakeCache := &startSignalingInformer{Cache: &informertest.FakeInformers{}} + options.NewCache = func(_ *rest.Config, _ cache.Options) (cache.Cache, error) { + return fakeCache, nil + } + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + + runnableWasStarted := make(chan struct{}) + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + defer GinkgoRecover() + if !fakeCache.wasSynced { + return errors.New("WaitForCacheSyncCalled wasn't called before Runnable got started") + } + close(runnableWasStarted) + return nil + }))).To(Succeed()) + + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).ToNot(HaveOccurred()) + }() + + <-runnableWasStarted + + additionalClusterCache := &startSignalingInformer{Cache: &informertest.FakeInformers{}} + fakeCluster := &startClusterAfterManager{informer: additionalClusterCache} + + Expect(err).NotTo(HaveOccurred()) + Expect(m.Add(fakeCluster)).NotTo(HaveOccurred()) + + Eventually(func() bool { + fakeCluster.informer.mu.Lock() + defer fakeCluster.informer.mu.Unlock() + return fakeCluster.informer.wasStarted && fakeCluster.informer.wasSynced + }).Should(BeTrue()) + }) + + It("should wait for runnables to stop", func(specCtx SpecContext) { + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + + var lock sync.Mutex + var runnableDoneCount int64 + runnableDoneFunc := func() { + lock.Lock() + defer lock.Unlock() + atomic.AddInt64(&runnableDoneCount, 1) + } + var wgRunnableRunning sync.WaitGroup + wgRunnableRunning.Add(2) + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + wgRunnableRunning.Done() + defer GinkgoRecover() + defer runnableDoneFunc() + <-ctx.Done() + return nil + }))).To(Succeed()) + + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + wgRunnableRunning.Done() + defer GinkgoRecover() + defer runnableDoneFunc() + <-ctx.Done() + time.Sleep(300 * time.Millisecond) // slow closure simulation + return nil + }))).To(Succeed()) + + defer GinkgoRecover() + ctx, cancel := context.WithCancel(specCtx) + + var wgManagerRunning sync.WaitGroup + wgManagerRunning.Add(1) + go func() { + defer GinkgoRecover() + defer wgManagerRunning.Done() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + Eventually(func() int64 { + return atomic.LoadInt64(&runnableDoneCount) + }).Should(BeEquivalentTo(2)) + }() + wgRunnableRunning.Wait() + cancel() + + wgManagerRunning.Wait() + }) + + It("should return an error if any Components fail to Start and wait for runnables to stop", func(ctx SpecContext) { + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + defer GinkgoRecover() + var lock sync.Mutex + runnableDoneCount := 0 + runnableDoneFunc := func() { + lock.Lock() + defer lock.Unlock() + runnableDoneCount++ + } + + Expect(m.Add(RunnableFunc(func(context.Context) error { + defer GinkgoRecover() + defer runnableDoneFunc() + return fmt.Errorf("expected error") + }))).To(Succeed()) + + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + defer GinkgoRecover() + defer runnableDoneFunc() + <-ctx.Done() + return nil + }))).To(Succeed()) + + Expect(m.Start(ctx)).To(HaveOccurred()) + Expect(runnableDoneCount).To(Equal(2)) + }) + + It("should refuse to add runnable if stop procedure is already engaged", func(specCtx SpecContext) { + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + defer GinkgoRecover() + + var wgRunnableRunning sync.WaitGroup + wgRunnableRunning.Add(1) + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + wgRunnableRunning.Done() + defer GinkgoRecover() + <-ctx.Done() + return nil + }))).To(Succeed()) + + ctx, cancel := context.WithCancel(specCtx) + go func() { + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + wgRunnableRunning.Wait() + cancel() + time.Sleep(100 * time.Millisecond) // give some time for the stop chan closure to be caught by the manager + Expect(m.Add(RunnableFunc(func(context.Context) error { + defer GinkgoRecover() + return nil + }))).NotTo(Succeed()) + }) + + It("should not return runnables context.Canceled errors", func(specCtx SpecContext) { + Expect(options.Logger).To(BeZero(), "this test overrides Logger") + + var log struct { + sync.Mutex + messages []string + } + options.Logger = funcr.NewJSON(func(object string) { + log.Lock() + log.messages = append(log.messages, object) + log.Unlock() + }, funcr.Options{}) + + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + + // Runnables may return ctx.Err() as shown in some [context.Context] examples. + started := make(chan struct{}) + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + close(started) + <-ctx.Done() + return ctx.Err() + }))).To(Succeed()) + + stopped := make(chan error) + ctx, cancel := context.WithCancel(specCtx) + go func() { + stopped <- m.Start(ctx) + }() + + // Wait for runnables to start, signal the manager, and wait for it to return. + <-started + cancel() + Expect(<-stopped).To(Succeed()) + + // The leader election goroutine emits one more log message after Start() returns. + // Take the lock here to avoid a race between it writing to log.messages and the + // following read from log.messages. + if options.LeaderElection { + log.Lock() + defer log.Unlock() + } + + Expect(log.messages).To(Not(ContainElement( + ContainSubstring(context.Canceled.Error()), + ))) + }) + + It("should default controller logger from manager logger", func(specCtx SpecContext) { + var lock sync.Mutex + var messages []string + options.Logger = funcr.NewJSON(func(object string) { + lock.Lock() + messages = append(messages, object) + lock.Unlock() + }, funcr.Options{}) + options.LeaderElection = false + + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + + started := make(chan struct{}) + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + close(started) + return nil + }))).To(Succeed()) + + stopped := make(chan error) + ctx, cancel := context.WithCancel(specCtx) + go func() { + stopped <- m.Start(ctx) + }() + + // Wait for runnables to start as a proxy for the manager being fully started. + <-started + cancel() + Expect(<-stopped).To(Succeed()) + + msg := "controller log message" + m.GetControllerOptions().Logger.Info(msg) + + Eventually(func(g Gomega) { + lock.Lock() + defer lock.Unlock() + + g.Expect(messages).To(ContainElement( + ContainSubstring(msg), + )) + }).Should(Succeed()) + }) + + It("should return both runnables and stop errors when both error", func(ctx SpecContext) { + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + m.(*controllerManager).gracefulShutdownTimeout = 1 * time.Nanosecond + Expect(m.Add(RunnableFunc(func(context.Context) error { + return runnableError{} + }))).To(Succeed()) + testDone := make(chan struct{}) + defer close(testDone) + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + <-ctx.Done() + timer := time.NewTimer(30 * time.Second) + defer timer.Stop() + select { + case <-testDone: + return nil + case <-timer.C: + return nil + } + }))).To(Succeed()) + err = m.Start(ctx) + Expect(err).To(HaveOccurred()) + eMsg := "[not feeling like that, failed waiting for all runnables to end within grace period of 1ns: context deadline exceeded]" + Expect(err.Error()).To(Equal(eMsg)) + Expect(errors.Is(err, context.DeadlineExceeded)).To(BeTrue()) + Expect(errors.Is(err, runnableError{})).To(BeTrue()) + }) + + It("should return only stop errors if runnables dont error", func(specCtx SpecContext) { + m, err := New(cfg, options) + Expect(err).NotTo(HaveOccurred()) + for _, cb := range callbacks { + cb(m) + } + m.(*controllerManager).gracefulShutdownTimeout = 1 * time.Nanosecond + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + <-ctx.Done() + return nil + }))).To(Succeed()) + testDone := make(chan struct{}) + defer close(testDone) + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + <-ctx.Done() + timer := time.NewTimer(30 * time.Second) + defer timer.Stop() + select { + case <-testDone: + return nil + case <-timer.C: + return nil + } + }))).To(Succeed()) + ctx, cancel := context.WithCancel(specCtx) + managerStopDone := make(chan struct{}) + go func() { err = m.Start(ctx); close(managerStopDone) }() + // Use the 'elected' channel to find out if startup was done, otherwise we stop + // before we started the Runnable and see flakes, mostly in low-CPU envs like CI + <-m.(*controllerManager).elected + cancel() + <-managerStopDone + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("failed waiting for all runnables to end within grace period of 1ns: context deadline exceeded")) + Expect(errors.Is(err, context.DeadlineExceeded)).To(BeTrue()) + Expect(errors.Is(err, runnableError{})).ToNot(BeTrue()) }) - It("should stop when stop is called", func(done Done) { + It("should return only runnables error if stop doesn't error", func(ctx SpecContext) { m, err := New(cfg, options) Expect(err).NotTo(HaveOccurred()) - s := make(chan struct{}) - close(s) - Expect(m.Start(s)).NotTo(HaveOccurred()) - - close(done) + for _, cb := range callbacks { + cb(m) + } + Expect(m.Add(RunnableFunc(func(context.Context) error { + return runnableError{} + }))).To(Succeed()) + err = m.Start(ctx) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("not feeling like that")) + Expect(errors.Is(err, context.DeadlineExceeded)).ToNot(BeTrue()) + Expect(errors.Is(err, runnableError{})).To(BeTrue()) }) - It("should return an error if it can't start the cache", func(done Done) { + It("should not wait for runnables if gracefulShutdownTimeout is 0", func(specCtx SpecContext) { m, err := New(cfg, options) Expect(err).NotTo(HaveOccurred()) - mgr, ok := m.(*controllerManager) - Expect(ok).To(BeTrue()) - mgr.startCache = func(stop <-chan struct{}) error { - return fmt.Errorf("expected error") + for _, cb := range callbacks { + cb(m) } - Expect(m.Start(stop).Error()).To(ContainSubstring("expected error")) + m.(*controllerManager).gracefulShutdownTimeout = time.Duration(0) + + runnableStopped := make(chan struct{}) + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + <-ctx.Done() + time.Sleep(100 * time.Millisecond) + close(runnableStopped) + return nil + }))).ToNot(HaveOccurred()) + + ctx, cancel := context.WithCancel(specCtx) + managerStopDone := make(chan struct{}) + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + close(managerStopDone) + }() + <-m.Elected() + cancel() - close(done) + <-managerStopDone + <-runnableStopped }) - It("should return an error if any Components fail to Start", func(done Done) { + It("should wait forever for runnables if gracefulShutdownTimeout is <0 (-1)", func(specCtx SpecContext) { m, err := New(cfg, options) Expect(err).NotTo(HaveOccurred()) - c1 := make(chan struct{}) - Expect(m.Add(RunnableFunc(func(s <-chan struct{}) error { - defer GinkgoRecover() - defer close(c1) - return nil - }))).To(Succeed()) - - c2 := make(chan struct{}) - Expect(m.Add(RunnableFunc(func(s <-chan struct{}) error { - defer GinkgoRecover() - defer close(c2) - return fmt.Errorf("expected error") - }))).To(Succeed()) + for _, cb := range callbacks { + cb(m) + } + m.(*controllerManager).gracefulShutdownTimeout = time.Duration(-1) - c3 := make(chan struct{}) - Expect(m.Add(RunnableFunc(func(s <-chan struct{}) error { - defer GinkgoRecover() - defer close(c3) + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + <-ctx.Done() + time.Sleep(100 * time.Millisecond) return nil - }))).To(Succeed()) + }))).ToNot(HaveOccurred()) + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + <-ctx.Done() + time.Sleep(200 * time.Millisecond) + return nil + }))).ToNot(HaveOccurred()) + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + <-ctx.Done() + time.Sleep(500 * time.Millisecond) + return nil + }))).ToNot(HaveOccurred()) + Expect(m.Add(RunnableFunc(func(ctx context.Context) error { + <-ctx.Done() + time.Sleep(1500 * time.Millisecond) + return nil + }))).ToNot(HaveOccurred()) + ctx, cancel := context.WithCancel(specCtx) + managerStopDone := make(chan struct{}) go func() { defer GinkgoRecover() - Expect(m.Start(stop)).NotTo(HaveOccurred()) - close(done) + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + close(managerStopDone) }() - <-c1 - <-c2 - <-c3 - }) + <-m.Elected() + cancel() - It("should return an error if any non-leaderelection Components fail to Start", func() { - // TODO(mengqiy): implement this after resolving https://github.com/kubernetes-sigs/controller-runtime/issues/429 + beforeDone := time.Now() + <-managerStopDone + Expect(time.Since(beforeDone)).To(BeNumerically(">=", 1500*time.Millisecond)) }) + } Context("with defaults", func() { @@ -295,101 +1264,129 @@ var _ = Describe("manger.Manager", func() { }) Context("with leaderelection enabled", func() { - startSuite(Options{ - LeaderElection: true, - LeaderElectionID: "controller-runtime", - LeaderElectionNamespace: "default", - newResourceLock: fakeleaderelection.NewResourceLock, + startSuite( + Options{ + LeaderElection: true, + LeaderElectionID: "controller-runtime", + LeaderElectionNamespace: "default", + newResourceLock: fakeleaderelection.NewResourceLock, + }, + func(m Manager) { + cm, ok := m.(*controllerManager) + Expect(ok).To(BeTrue()) + cm.onStoppedLeading = func() {} + }, + ) + + It("should return an error if leader election param incorrect", func(specCtx SpecContext) { + renewDeadline := time.Second * 20 + m, err := New(cfg, Options{ + LeaderElection: true, + LeaderElectionID: "controller-runtime", + LeaderElectionNamespace: "default", + newResourceLock: fakeleaderelection.NewResourceLock, + RenewDeadline: &renewDeadline, + }) + Expect(err).NotTo(HaveOccurred()) + ctx, cancel := context.WithTimeout(specCtx, time.Second*10) + defer cancel() + err = m.Start(ctx) + Expect(err).To(HaveOccurred()) + Expect(errors.Is(err, context.DeadlineExceeded)).NotTo(BeTrue()) }) }) Context("should start serving metrics", func() { - var listener net.Listener + var srv metricsserver.Server + var defaultServer metricsDefaultServer var opts Options BeforeEach(func() { - listener = nil + srv = nil opts = Options{ - newMetricsListener: func(addr string) (net.Listener, error) { + Metrics: metricsserver.Options{ + BindAddress: ":0", + }, + newMetricsServer: func(options metricsserver.Options, config *rest.Config, httpClient *http.Client) (metricsserver.Server, error) { var err error - listener, err = metrics.NewListener(addr) - return listener, err + srv, err = metricsserver.NewServer(options, config, httpClient) + if srv != nil { + defaultServer = srv.(metricsDefaultServer) + } + return srv, err }, } }) - AfterEach(func() { - if listener != nil { - listener.Close() - } - }) - - It("should stop serving metrics when stop is called", func(done Done) { - opts.MetricsBindAddress = ":0" + It("should stop serving metrics when stop is called", func(specCtx SpecContext) { m, err := New(cfg, opts) Expect(err).NotTo(HaveOccurred()) - s := make(chan struct{}) + ctx, cancel := context.WithCancel(specCtx) go func() { defer GinkgoRecover() - Expect(m.Start(s)).NotTo(HaveOccurred()) - close(done) + Expect(m.Start(ctx)).NotTo(HaveOccurred()) }() + <-m.Elected() + // Note: Wait until metrics server has been started. A finished leader election + // doesn't guarantee that the metrics server is up. + Eventually(func() string { return defaultServer.GetBindAddr() }, 10*time.Second).ShouldNot(BeEmpty()) // Check the metrics started - endpoint := fmt.Sprintf("http://%s", listener.Addr().String()) + endpoint := fmt.Sprintf("http://%s/metrics", defaultServer.GetBindAddr()) _, err = http.Get(endpoint) Expect(err).NotTo(HaveOccurred()) // Shutdown the server - close(s) + cancel() // Expect the metrics server to shutdown Eventually(func() error { _, err = http.Get(endpoint) return err - }).ShouldNot(Succeed()) + }, 10*time.Second).ShouldNot(Succeed()) }) - It("should serve metrics endpoint", func(done Done) { - opts.MetricsBindAddress = ":0" + It("should serve metrics endpoint", func(ctx SpecContext) { m, err := New(cfg, opts) Expect(err).NotTo(HaveOccurred()) - s := make(chan struct{}) - defer close(s) go func() { defer GinkgoRecover() - Expect(m.Start(s)).NotTo(HaveOccurred()) - close(done) + Expect(m.Start(ctx)).NotTo(HaveOccurred()) }() + <-m.Elected() + // Note: Wait until metrics server has been started. A finished leader election + // doesn't guarantee that the metrics server is up. + Eventually(func() string { return defaultServer.GetBindAddr() }, 10*time.Second).ShouldNot(BeEmpty()) - metricsEndpoint := fmt.Sprintf("http://%s/metrics", listener.Addr().String()) + metricsEndpoint := fmt.Sprintf("http://%s/metrics", defaultServer.GetBindAddr()) resp, err := http.Get(metricsEndpoint) Expect(err).NotTo(HaveOccurred()) Expect(resp.StatusCode).To(Equal(200)) }) - It("should not serve anything other than metrics endpoint", func(done Done) { - opts.MetricsBindAddress = ":0" + It("should not serve anything other than metrics endpoint by default", func(ctx SpecContext) { m, err := New(cfg, opts) Expect(err).NotTo(HaveOccurred()) - s := make(chan struct{}) - defer close(s) go func() { defer GinkgoRecover() - Expect(m.Start(s)).NotTo(HaveOccurred()) - close(done) + Expect(m.Start(ctx)).NotTo(HaveOccurred()) }() + <-m.Elected() + // Note: Wait until metrics server has been started. A finished leader election + // doesn't guarantee that the metrics server is up. + Eventually(func() string { return defaultServer.GetBindAddr() }, 10*time.Second).ShouldNot(BeEmpty()) - endpoint := fmt.Sprintf("http://%s/should-not-exist", listener.Addr().String()) + endpoint := fmt.Sprintf("http://%s/should-not-exist", defaultServer.GetBindAddr()) resp, err := http.Get(endpoint) Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() Expect(resp.StatusCode).To(Equal(404)) }) - It("should serve metrics in its registry", func(done Done) { + It("should serve metrics in its registry", func(ctx SpecContext) { one := prometheus.NewCounter(prometheus.CounterOpts{ Name: "test_one", Help: "test metric for testing", @@ -398,24 +1395,25 @@ var _ = Describe("manger.Manager", func() { err := metrics.Registry.Register(one) Expect(err).NotTo(HaveOccurred()) - opts.MetricsBindAddress = ":0" m, err := New(cfg, opts) Expect(err).NotTo(HaveOccurred()) - s := make(chan struct{}) - defer close(s) go func() { defer GinkgoRecover() - Expect(m.Start(s)).NotTo(HaveOccurred()) - close(done) + Expect(m.Start(ctx)).NotTo(HaveOccurred()) }() + <-m.Elected() + // Note: Wait until metrics server has been started. A finished leader election + // doesn't guarantee that the metrics server is up. + Eventually(func() string { return defaultServer.GetBindAddr() }, 10*time.Second).ShouldNot(BeEmpty()) - metricsEndpoint := fmt.Sprintf("http://%s/metrics", listener.Addr().String()) + metricsEndpoint := fmt.Sprintf("http://%s/metrics", defaultServer.GetBindAddr()) resp, err := http.Get(metricsEndpoint) Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() Expect(resp.StatusCode).To(Equal(200)) - data, err := ioutil.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) Expect(err).NotTo(HaveOccurred()) Expect(string(data)).To(ContainSubstring("%s\n%s\n%s\n", `# HELP test_one test metric for testing`, @@ -427,12 +1425,293 @@ var _ = Describe("manger.Manager", func() { ok := metrics.Registry.Unregister(one) Expect(ok).To(BeTrue()) }) + + It("should serve extra endpoints", func(ctx SpecContext) { + opts.Metrics.ExtraHandlers = map[string]http.Handler{ + "/debug": http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + _, _ = w.Write([]byte("Some debug info")) + }), + } + m, err := New(cfg, opts) + Expect(err).NotTo(HaveOccurred()) + + // Should error when we add another extra endpoint on the already registered path. + err = m.AddMetricsServerExtraHandler("/debug", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + _, _ = w.Write([]byte("Another debug info")) + })) + Expect(err).To(HaveOccurred()) + + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + <-m.Elected() + // Note: Wait until metrics server has been started. A finished leader election + // doesn't guarantee that the metrics server is up. + Eventually(func() string { return defaultServer.GetBindAddr() }, 10*time.Second).ShouldNot(BeEmpty()) + + endpoint := fmt.Sprintf("http://%s/debug", defaultServer.GetBindAddr()) + resp, err := http.Get(endpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + + body, err := io.ReadAll(resp.Body) + Expect(err).NotTo(HaveOccurred()) + Expect(string(body)).To(Equal("Some debug info")) + }) + }) + }) + + Context("should start serving health probes", func() { + var listener net.Listener + var opts Options + + BeforeEach(func() { + listener = nil + opts = Options{ + newHealthProbeListener: func(addr string) (net.Listener, error) { + var err error + listener, err = defaultHealthProbeListener(addr) + return listener, err + }, + } + }) + + AfterEach(func() { + if listener != nil { + listener.Close() + } + }) + + It("should stop serving health probes when stop is called", func(specCtx SpecContext) { + opts.HealthProbeBindAddress = ":0" + m, err := New(cfg, opts) + Expect(err).NotTo(HaveOccurred()) + + ctx, cancel := context.WithCancel(specCtx) + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + <-m.Elected() + + // Check the health probes started + endpoint := fmt.Sprintf("http://%s", listener.Addr().String()) + _, err = http.Get(endpoint) + Expect(err).NotTo(HaveOccurred()) + + // Shutdown the server + cancel() + + // Expect the health probes server to shutdown + Eventually(func() error { + _, err = http.Get(endpoint) + return err + }, 10*time.Second).ShouldNot(Succeed()) + }) + + It("should serve readiness endpoint", func(ctx SpecContext) { + opts.HealthProbeBindAddress = ":0" + m, err := New(cfg, opts) + Expect(err).NotTo(HaveOccurred()) + + res := fmt.Errorf("not ready yet") + namedCheck := "check" + err = m.AddReadyzCheck(namedCheck, func(_ *http.Request) error { return res }) + Expect(err).NotTo(HaveOccurred()) + + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + <-m.Elected() + + readinessEndpoint := fmt.Sprint("http://", listener.Addr().String(), defaultReadinessEndpoint) + + // Controller is not ready + resp, err := http.Get(readinessEndpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusInternalServerError)) + + // Controller is ready + res = nil + resp, err = http.Get(readinessEndpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + + // Check readiness path without trailing slash without redirect + readinessEndpoint = fmt.Sprint("http://", listener.Addr().String(), defaultReadinessEndpoint) + res = nil + httpClient := http.Client{ + CheckRedirect: func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse // Do not follow redirect + }, + } + resp, err = httpClient.Get(readinessEndpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + + // Check readiness path for individual check + readinessEndpoint = fmt.Sprint("http://", listener.Addr().String(), path.Join(defaultReadinessEndpoint, namedCheck)) + res = nil + resp, err = http.Get(readinessEndpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + }) + + It("should serve liveness endpoint", func(ctx SpecContext) { + opts.HealthProbeBindAddress = ":0" + m, err := New(cfg, opts) + Expect(err).NotTo(HaveOccurred()) + + res := fmt.Errorf("not alive") + namedCheck := "check" + err = m.AddHealthzCheck(namedCheck, func(_ *http.Request) error { return res }) + Expect(err).NotTo(HaveOccurred()) + + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + <-m.Elected() + + livenessEndpoint := fmt.Sprint("http://", listener.Addr().String(), defaultLivenessEndpoint) + + // Controller is not ready + resp, err := http.Get(livenessEndpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusInternalServerError)) + + // Controller is ready + res = nil + resp, err = http.Get(livenessEndpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + + // Check liveness path without trailing slash without redirect + livenessEndpoint = fmt.Sprint("http://", listener.Addr().String(), defaultLivenessEndpoint) + res = nil + httpClient := http.Client{ + CheckRedirect: func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse // Do not follow redirect + }, + } + resp, err = httpClient.Get(livenessEndpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + + // Check readiness path for individual check + livenessEndpoint = fmt.Sprint("http://", listener.Addr().String(), path.Join(defaultLivenessEndpoint, namedCheck)) + res = nil + resp, err = http.Get(livenessEndpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + }) + }) + + Context("should start serving pprof", func() { + var listener net.Listener + var opts Options + + BeforeEach(func() { + listener = nil + opts = Options{ + newPprofListener: func(addr string) (net.Listener, error) { + var err error + listener, err = defaultPprofListener(addr) + return listener, err + }, + } + }) + + AfterEach(func() { + if listener != nil { + listener.Close() + } + }) + + It("should stop serving pprof when stop is called", func(specCtx SpecContext) { + opts.PprofBindAddress = ":0" + m, err := New(cfg, opts) + Expect(err).NotTo(HaveOccurred()) + + ctx, cancel := context.WithCancel(specCtx) + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + <-m.Elected() + + // Check the pprof started + endpoint := fmt.Sprintf("http://%s", listener.Addr().String()) + _, err = http.Get(endpoint) + Expect(err).NotTo(HaveOccurred()) + + // Shutdown the server + cancel() + + // Expect the pprof server to shutdown + Eventually(func() error { + _, err = http.Get(endpoint) + return err + }, 10*time.Second).ShouldNot(Succeed()) + }) + + It("should serve pprof endpoints", func(ctx SpecContext) { + opts.PprofBindAddress = ":0" + m, err := New(cfg, opts) + Expect(err).NotTo(HaveOccurred()) + + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + <-m.Elected() + + pprofIndexEndpoint := fmt.Sprintf("http://%s/debug/pprof/", listener.Addr().String()) + resp, err := http.Get(pprofIndexEndpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + + pprofCmdlineEndpoint := fmt.Sprintf("http://%s/debug/pprof/cmdline", listener.Addr().String()) + resp, err = http.Get(pprofCmdlineEndpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + + pprofProfileEndpoint := fmt.Sprintf("http://%s/debug/pprof/profile", listener.Addr().String()) + resp, err = http.Get(pprofProfileEndpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + + pprofSymbolEndpoint := fmt.Sprintf("http://%s/debug/pprof/symbol", listener.Addr().String()) + resp, err = http.Get(pprofSymbolEndpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + + pprofTraceEndpoint := fmt.Sprintf("http://%s/debug/pprof/trace", listener.Addr().String()) + resp, err = http.Get(pprofTraceEndpoint) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(http.StatusOK)) }) }) Describe("Add", func() { It("should immediately start the Component if the Manager has already Started another Component", - func(done Done) { + func(ctx SpecContext) { m, err := New(cfg, Options{}) Expect(err).NotTo(HaveOccurred()) mgr, ok := m.(*controllerManager) @@ -440,34 +1719,35 @@ var _ = Describe("manger.Manager", func() { // Add one component before starting c1 := make(chan struct{}) - Expect(m.Add(RunnableFunc(func(s <-chan struct{}) error { - defer close(c1) + Expect(m.Add(RunnableFunc(func(context.Context) error { defer GinkgoRecover() + close(c1) return nil }))).To(Succeed()) go func() { defer GinkgoRecover() - Expect(m.Start(stop)).NotTo(HaveOccurred()) + Expect(m.Start(ctx)).NotTo(HaveOccurred()) }() + <-m.Elected() // Wait for the Manager to start - Eventually(func() bool { return mgr.started }).Should(BeTrue()) + Eventually(func() bool { + return mgr.runnables.Caches.Started() + }).Should(BeTrue()) // Add another component after starting c2 := make(chan struct{}) - Expect(m.Add(RunnableFunc(func(s <-chan struct{}) error { - defer close(c2) + Expect(m.Add(RunnableFunc(func(context.Context) error { defer GinkgoRecover() + close(c2) return nil }))).To(Succeed()) <-c1 <-c2 - - close(done) }) - It("should immediately start the Component if the Manager has already Started", func(done Done) { + It("should immediately start the Component if the Manager has already Started", func(ctx SpecContext) { m, err := New(cfg, Options{}) Expect(err).NotTo(HaveOccurred()) mgr, ok := m.(*controllerManager) @@ -475,118 +1755,155 @@ var _ = Describe("manger.Manager", func() { go func() { defer GinkgoRecover() - Expect(m.Start(stop)).NotTo(HaveOccurred()) + Expect(m.Start(ctx)).NotTo(HaveOccurred()) }() // Wait for the Manager to start - Eventually(func() bool { return mgr.started }).Should(BeTrue()) + Eventually(func() bool { + return mgr.runnables.Caches.Started() + }).Should(BeTrue()) c1 := make(chan struct{}) - Expect(m.Add(RunnableFunc(func(s <-chan struct{}) error { - defer close(c1) + Expect(m.Add(RunnableFunc(func(context.Context) error { defer GinkgoRecover() + close(c1) return nil }))).To(Succeed()) <-c1 - - close(done) }) - It("should fail if SetFields fails", func() { + It("should fail if attempted to start a second time", func(ctx SpecContext) { m, err := New(cfg, Options{}) Expect(err).NotTo(HaveOccurred()) - Expect(m.Add(&failRec{})).To(HaveOccurred()) + + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + // Wait for the Manager to start + Eventually(func() bool { + mgr, ok := m.(*controllerManager) + Expect(ok).To(BeTrue()) + return mgr.runnables.Caches.Started() + }).Should(BeTrue()) + + err = m.Start(ctx) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("manager already started")) + }) }) - Describe("SetFields", func() { - It("should inject field values", func(done Done) { - m, err := New(cfg, Options{}) - Expect(err).NotTo(HaveOccurred()) - mgr, ok := m.(*controllerManager) - Expect(ok).To(BeTrue()) - mgr.cache = &informertest.FakeInformers{} + It("should not leak goroutines when stopped", func(specCtx SpecContext) { + currentGRs := goleak.IgnoreCurrent() - By("Injecting the dependencies") - err = m.SetFields(&injectable{ - scheme: func(scheme *runtime.Scheme) error { - defer GinkgoRecover() - Expect(scheme).To(Equal(m.GetScheme())) - return nil - }, - config: func(config *rest.Config) error { - defer GinkgoRecover() - Expect(config).To(Equal(m.GetConfig())) - return nil - }, - client: func(client client.Client) error { - defer GinkgoRecover() - Expect(client).To(Equal(m.GetClient())) - return nil - }, - cache: func(c cache.Cache) error { - defer GinkgoRecover() - Expect(c).To(Equal(m.GetCache())) - return nil - }, - stop: func(stop <-chan struct{}) error { - defer GinkgoRecover() - Expect(stop).NotTo(BeNil()) - return nil - }, - f: func(f inject.Func) error { - defer GinkgoRecover() - Expect(f).NotTo(BeNil()) - return nil - }, - }) + m, err := New(cfg, Options{}) + Expect(err).NotTo(HaveOccurred()) + + ctx, cancel := context.WithCancel(specCtx) + cancel() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + + // force-close keep-alive connections. These'll time anyway (after + // like 30s or so) but force it to speed up the tests. + clientTransport.CloseIdleConnections() + Eventually(func() error { return goleak.Find(currentGRs) }).Should(Succeed()) + }) + + It("should not leak goroutines if the default event broadcaster is used & events are emitted", func(specCtx SpecContext) { + currentGRs := goleak.IgnoreCurrent() + + m, err := New(cfg, Options{ /* implicit: default setting for EventBroadcaster */ }) + Expect(err).NotTo(HaveOccurred()) + + By("adding a runnable that emits an event") + ns := corev1.Namespace{} + ns.Name = "default" + + recorder := m.GetEventRecorderFor("rock-and-roll") + Expect(m.Add(RunnableFunc(func(_ context.Context) error { + recorder.Event(&ns, "Warning", "BallroomBlitz", "yeah, yeah, yeah-yeah-yeah") + return nil + }))).To(Succeed()) + + By("starting the manager & waiting till we've sent our event") + ctx, cancel := context.WithCancel(specCtx) + doneCh := make(chan struct{}) + go func() { + defer GinkgoRecover() + defer close(doneCh) + Expect(m.Start(ctx)).To(Succeed()) + }() + <-m.Elected() + + Eventually(func() *corev1.Event { + evts, err := clientset.CoreV1().Events("").SearchWithContext(ctx, m.GetScheme(), &ns) Expect(err).NotTo(HaveOccurred()) - By("Returning an error if dependency injection fails") + for i, evt := range evts.Items { + if evt.Reason == "BallroomBlitz" { + return &evts.Items[i] + } + } + return nil + }).ShouldNot(BeNil()) + + By("making sure there's no extra go routines still running after we stop") + cancel() + <-doneCh + + // force-close keep-alive connections. These'll time anyway (after + // like 30s or so) but force it to speed up the tests. + clientTransport.CloseIdleConnections() + Eventually(func() error { return goleak.Find(currentGRs) }).Should(Succeed()) + }) - expected := fmt.Errorf("expected error") - err = m.SetFields(&injectable{ - client: func(client client.Client) error { - return expected - }, - }) - Expect(err).To(Equal(expected)) + It("should not leak goroutines when a runnable returns error slowly after being signaled to stop", func(specCtx SpecContext) { + // This test reproduces the race condition where the manager's Start method + // exits due to context cancellation, leaving no one to drain errChan - err = m.SetFields(&injectable{ - scheme: func(scheme *runtime.Scheme) error { - return expected - }, - }) - Expect(err).To(Equal(expected)) + currentGRs := goleak.IgnoreCurrent() - err = m.SetFields(&injectable{ - config: func(config *rest.Config) error { - return expected - }, - }) - Expect(err).To(Equal(expected)) + // Create manager with a very short graceful shutdown timeout to reliablytrigger the race condition + shortGracefulShutdownTimeout := 10 * time.Millisecond + m, err := New(cfg, Options{ + GracefulShutdownTimeout: &shortGracefulShutdownTimeout, + }) + Expect(err).NotTo(HaveOccurred()) - err = m.SetFields(&injectable{ - cache: func(c cache.Cache) error { - return expected - }, - }) - Expect(err).To(Equal(expected)) + // Add the slow runnable that will return an error after some delay + for i := 0; i < 3; i++ { + slowRunnable := RunnableFunc(func(c context.Context) error { + <-c.Done() - err = m.SetFields(&injectable{ - f: func(c inject.Func) error { - return expected - }, - }) - Expect(err).To(Equal(expected)) - err = m.SetFields(&injectable{ - stop: func(<-chan struct{}) error { - return expected - }, + // Simulate some work that delays the error from being returned + // Choosing a large delay to reliably trigger the race condition + time.Sleep(100 * time.Millisecond) + + // This simulates the race condition where runnables try to send + // errors after the manager has stopped reading from errChan + return errors.New("slow runnable error") }) - Expect(err).To(Equal(expected)) - close(done) - }) + + Expect(m.Add(slowRunnable)).To(Succeed()) + } + + ctx, cancel := context.WithTimeout(specCtx, 50*time.Millisecond) + defer cancel() + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).To(HaveOccurred()) // We expect error here because the slow runnables will return errors + }() + + // Wait for context to be cancelled + <-ctx.Done() + + // Give time for any leaks to become apparent. This makes sure that we don't false alarm on go routine leaks because runnables are still running. + time.Sleep(300 * time.Millisecond) + + // force-close keep-alive connections + clientTransport.CloseIdleConnections() + Eventually(func() error { return goleak.Find(currentGRs) }).Should(Succeed()) }) It("should provide a function to get the Config", func() { @@ -594,7 +1911,7 @@ var _ = Describe("manger.Manager", func() { Expect(err).NotTo(HaveOccurred()) mgr, ok := m.(*controllerManager) Expect(ok).To(BeTrue()) - Expect(m.GetConfig()).To(Equal(mgr.config)) + Expect(m.GetConfig()).To(Equal(mgr.cluster.GetConfig())) }) It("should provide a function to get the Client", func() { @@ -602,7 +1919,7 @@ var _ = Describe("manger.Manager", func() { Expect(err).NotTo(HaveOccurred()) mgr, ok := m.(*controllerManager) Expect(ok).To(BeTrue()) - Expect(m.GetClient()).To(Equal(mgr.client)) + Expect(m.GetClient()).To(Equal(mgr.cluster.GetClient())) }) It("should provide a function to get the Scheme", func() { @@ -610,7 +1927,7 @@ var _ = Describe("manger.Manager", func() { Expect(err).NotTo(HaveOccurred()) mgr, ok := m.(*controllerManager) Expect(ok).To(BeTrue()) - Expect(m.GetScheme()).To(Equal(mgr.scheme)) + Expect(m.GetScheme()).To(Equal(mgr.cluster.GetScheme())) }) It("should provide a function to get the FieldIndexer", func() { @@ -618,7 +1935,7 @@ var _ = Describe("manger.Manager", func() { Expect(err).NotTo(HaveOccurred()) mgr, ok := m.(*controllerManager) Expect(ok).To(BeTrue()) - Expect(m.GetFieldIndexer()).To(Equal(mgr.fieldIndexes)) + Expect(m.GetFieldIndexer()).To(Equal(mgr.cluster.GetFieldIndexer())) }) It("should provide a function to get the EventRecorder", func() { @@ -631,83 +1948,156 @@ var _ = Describe("manger.Manager", func() { Expect(err).NotTo(HaveOccurred()) Expect(m.GetAPIReader()).NotTo(BeNil()) }) + + It("should run warmup runnables before leader election is won", func(ctx SpecContext) { + By("Creating a channel to track execution order") + runnableExecutionOrderChan := make(chan string, 2) + const leaderElectionRunnableName = "leaderElectionRunnable" + const warmupRunnableName = "warmupRunnable" + + By("Creating a manager with leader election enabled") + m, err := New(cfg, Options{ + LeaderElection: true, + LeaderElectionNamespace: "default", + LeaderElectionID: "test-leader-election-warmup", + newResourceLock: fakeleaderelection.NewResourceLock, + HealthProbeBindAddress: "0", + Metrics: metricsserver.Options{BindAddress: "0"}, + PprofBindAddress: "0", + }) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a runnable that implements WarmupRunnable interface") + // Create a warmup runnable + warmupRunnable := newWarmupRunnableFunc( + func(ctx context.Context) error { + // This is the leader election runnable that will be executed after leader election + // It will block until context is done/cancelled + <-ctx.Done() + return nil + }, + func(ctx context.Context) error { + // This should be called during startup before leader election + runnableExecutionOrderChan <- warmupRunnableName + return nil + }, + ) + Expect(m.Add(warmupRunnable)).To(Succeed()) + + By("Creating a runnable that requires leader election") + leaderElectionRunnable := RunnableFunc( + func(ctx context.Context) error { + runnableExecutionOrderChan <- leaderElectionRunnableName + <-ctx.Done() + return nil + }, + ) + Expect(m.Add(leaderElectionRunnable)).To(Succeed()) + + cm, ok := m.(*controllerManager) + Expect(ok).To(BeTrue()) + resourceLockWithHooks, ok := cm.resourceLock.(fakeleaderelection.ControllableResourceLockInterface) + Expect(ok).To(BeTrue()) + + By("Blocking leader election") + resourceLockWithHooks.BlockLeaderElection() + + By("Starting the manager") + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).To(Succeed()) + }() + + By("Waiting for the warmup runnable to be executed without leader election being won") + Expect(<-runnableExecutionOrderChan).To(Equal(warmupRunnableName)) + + By("Unblocking leader election") + resourceLockWithHooks.UnblockLeaderElection() + + By("Waiting for the leader election runnable to be executed after leader election was won") + <-m.Elected() + Expect(<-runnableExecutionOrderChan).To(Equal(leaderElectionRunnableName)) + }) }) -var _ reconcile.Reconciler = &failRec{} -var _ inject.Client = &failRec{} +type runnableError struct { +} + +func (runnableError) Error() string { + return "not feeling like that" +} -type failRec struct{} +var _ Runnable = &cacheProvider{} -func (*failRec) Reconcile(reconcile.Request) (reconcile.Result, error) { - return reconcile.Result{}, nil +type cacheProvider struct { + cache cache.Cache } -func (*failRec) Start(<-chan struct{}) error { - return nil +func (c *cacheProvider) GetCache() cache.Cache { + return c.cache } -func (*failRec) InjectClient(client.Client) error { - return fmt.Errorf("expected error") +func (c *cacheProvider) Start(ctx context.Context) error { + return c.cache.Start(ctx) } -var _ inject.Injector = &injectable{} -var _ inject.Cache = &injectable{} -var _ inject.Client = &injectable{} -var _ inject.Scheme = &injectable{} -var _ inject.Config = &injectable{} -var _ inject.Stoppable = &injectable{} - -type injectable struct { - scheme func(scheme *runtime.Scheme) error - client func(client.Client) error - config func(config *rest.Config) error - cache func(cache.Cache) error - f func(inject.Func) error - stop func(<-chan struct{}) error +type startSignalingInformer struct { + mu sync.Mutex + + // The manager calls Start and WaitForCacheSync in + // parallel, so we have to protect wasStarted with a Mutex + // and block in WaitForCacheSync until it is true. + wasStarted bool + // was synced will be true once Start was called and + // WaitForCacheSync returned, just like a real cache. + wasSynced bool + cache.Cache } -func (i *injectable) InjectCache(c cache.Cache) error { - if i.cache == nil { - return nil - } - return i.cache(c) +func (c *startSignalingInformer) Start(ctx context.Context) error { + c.mu.Lock() + c.wasStarted = true + c.mu.Unlock() + return c.Cache.Start(ctx) } -func (i *injectable) InjectConfig(config *rest.Config) error { - if i.config == nil { - return nil - } - return i.config(config) +func (c *startSignalingInformer) WaitForCacheSync(ctx context.Context) bool { + defer func() { + c.mu.Lock() + c.wasSynced = true + c.mu.Unlock() + }() + return c.Cache.WaitForCacheSync(ctx) } -func (i *injectable) InjectClient(c client.Client) error { - if i.client == nil { - return nil - } - return i.client(c) +type startClusterAfterManager struct { + informer *startSignalingInformer } -func (i *injectable) InjectScheme(scheme *runtime.Scheme) error { - if i.scheme == nil { - return nil - } - return i.scheme(scheme) +func (c *startClusterAfterManager) Start(ctx context.Context) error { + return c.informer.Start(ctx) } -func (i *injectable) InjectFunc(f inject.Func) error { - if i.f == nil { - return nil - } - return i.f(f) +func (c *startClusterAfterManager) GetCache() cache.Cache { + return c.informer } -func (i *injectable) InjectStopChannel(stop <-chan struct{}) error { - if i.stop == nil { - return nil - } - return i.stop(stop) +// metricsDefaultServer is used to type check the default metrics server implementation +// so we can retrieve the bind addr without having to make GetBindAddr a function on the +// metricsserver.Server interface or resort to reflection. +type metricsDefaultServer interface { + GetBindAddr() string } -func (i *injectable) Start(<-chan struct{}) error { +type needElection struct { + ch chan struct{} +} + +func (n *needElection) Start(_ context.Context) error { + n.ch <- struct{}{} return nil } + +func (n *needElection) NeedLeaderElection() bool { + return true +} diff --git a/pkg/manager/runnable_group.go b/pkg/manager/runnable_group.go new file mode 100644 index 0000000000..53e29fc56f --- /dev/null +++ b/pkg/manager/runnable_group.go @@ -0,0 +1,371 @@ +package manager + +import ( + "context" + "errors" + "sync" + + "github.com/go-logr/logr" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +var ( + errRunnableGroupStopped = errors.New("can't accept new runnable as stop procedure is already engaged") +) + +// readyRunnable encapsulates a runnable with +// a ready check. +type readyRunnable struct { + Runnable + Check runnableCheck + signalReady bool +} + +// runnableCheck can be passed to Add() to let the runnable group determine that a +// runnable is ready. A runnable check should block until a runnable is ready, +// if the returned result is false, the runnable is considered not ready and failed. +type runnableCheck func(ctx context.Context) bool + +// runnables handles all the runnables for a manager by grouping them accordingly to their +// type (webhooks, caches etc.). +type runnables struct { + HTTPServers *runnableGroup + Webhooks *runnableGroup + Caches *runnableGroup + LeaderElection *runnableGroup + Warmup *runnableGroup + Others *runnableGroup +} + +// newRunnables creates a new runnables object. +func newRunnables(baseContext BaseContextFunc, errChan chan error) *runnables { + return &runnables{ + HTTPServers: newRunnableGroup(baseContext, errChan), + Webhooks: newRunnableGroup(baseContext, errChan), + Caches: newRunnableGroup(baseContext, errChan), + LeaderElection: newRunnableGroup(baseContext, errChan), + Warmup: newRunnableGroup(baseContext, errChan), + Others: newRunnableGroup(baseContext, errChan), + } +} + +// withLogger returns the runnables with the logger set for all runnable groups. +func (r *runnables) withLogger(logger logr.Logger) *runnables { + r.HTTPServers.withLogger(logger) + r.Webhooks.withLogger(logger) + r.Caches.withLogger(logger) + r.LeaderElection.withLogger(logger) + r.Others.withLogger(logger) + return r +} + +// Add adds a runnable to closest group of runnable that they belong to. +// +// Add should be able to be called before and after Start, but not after StopAndWait. +// Add should return an error when called during StopAndWait. +// The runnables added before Start are started when Start is called. +// The runnables added after Start are started directly. +func (r *runnables) Add(fn Runnable) error { + switch runnable := fn.(type) { + case *Server: + if runnable.NeedLeaderElection() { + return r.LeaderElection.Add(fn, nil) + } + return r.HTTPServers.Add(fn, nil) + case hasCache: + return r.Caches.Add(fn, func(ctx context.Context) bool { + return runnable.GetCache().WaitForCacheSync(ctx) + }) + case webhook.Server: + return r.Webhooks.Add(fn, nil) + case warmupRunnable, LeaderElectionRunnable: + if warmupRunnable, ok := fn.(warmupRunnable); ok { + if err := r.Warmup.Add(RunnableFunc(warmupRunnable.Warmup), nil); err != nil { + return err + } + } + + leaderElectionRunnable, ok := fn.(LeaderElectionRunnable) + if !ok { + // If the runnable is not a LeaderElectionRunnable, add it to the leader election group for backwards compatibility + return r.LeaderElection.Add(fn, nil) + } + + if !leaderElectionRunnable.NeedLeaderElection() { + return r.Others.Add(fn, nil) + } + return r.LeaderElection.Add(fn, nil) + default: + return r.LeaderElection.Add(fn, nil) + } +} + +// runnableGroup manages a group of runnables that are +// meant to be running together until StopAndWait is called. +// +// Runnables can be added to a group after the group has started +// but not after it's stopped or while shutting down. +type runnableGroup struct { + ctx context.Context + cancel context.CancelFunc + + start sync.Mutex + startOnce sync.Once + started bool + startQueue []*readyRunnable + startReadyCh chan *readyRunnable + + stop sync.RWMutex + stopOnce sync.Once + stopped bool + + // errChan is the error channel passed by the caller + // when the group is created. + // All errors are forwarded to this channel once they occur. + errChan chan error + + // ch is the internal channel where the runnables are read off from. + ch chan *readyRunnable + + // wg is an internal sync.WaitGroup that allows us to properly stop + // and wait for all the runnables to finish before returning. + wg *sync.WaitGroup + + // logger is used for logging when errors are dropped during shutdown + logger logr.Logger +} + +func newRunnableGroup(baseContext BaseContextFunc, errChan chan error) *runnableGroup { + r := &runnableGroup{ + startReadyCh: make(chan *readyRunnable), + errChan: errChan, + ch: make(chan *readyRunnable), + wg: new(sync.WaitGroup), + logger: logr.Discard(), // Default to no-op logger + } + + r.ctx, r.cancel = context.WithCancel(baseContext()) + return r +} + +// withLogger sets the logger for this runnable group. +func (r *runnableGroup) withLogger(logger logr.Logger) { + r.logger = logger +} + +// Started returns true if the group has started. +func (r *runnableGroup) Started() bool { + r.start.Lock() + defer r.start.Unlock() + return r.started +} + +// Start starts the group and waits for all +// initially registered runnables to start. +// It can only be called once, subsequent calls have no effect. +func (r *runnableGroup) Start(ctx context.Context) error { + var retErr error + + r.startOnce.Do(func() { + defer close(r.startReadyCh) + + // Start the internal reconciler. + go r.reconcile() + + // Start the group and queue up all + // the runnables that were added prior. + r.start.Lock() + r.started = true + for _, rn := range r.startQueue { + rn.signalReady = true + r.ch <- rn + } + r.start.Unlock() + + // If we don't have any queue, return. + if len(r.startQueue) == 0 { + return + } + + // Wait for all runnables to signal. + for { + select { + case <-ctx.Done(): + if err := ctx.Err(); !errors.Is(err, context.Canceled) { + retErr = err + } + case rn := <-r.startReadyCh: + for i, existing := range r.startQueue { + if existing == rn { + // Remove the item from the start queue. + r.startQueue = append(r.startQueue[:i], r.startQueue[i+1:]...) + break + } + } + // We're done waiting if the queue is empty, return. + if len(r.startQueue) == 0 { + return + } + } + } + }) + + return retErr +} + +// reconcile is our main entrypoint for every runnable added +// to this group. Its primary job is to read off the internal channel +// and schedule runnables while tracking their state. +func (r *runnableGroup) reconcile() { + for runnable := range r.ch { + // Handle stop. + // If the shutdown has been called we want to avoid + // adding new goroutines to the WaitGroup because Wait() + // panics if Add() is called after it. + { + r.stop.RLock() + if r.stopped { + // Drop any runnables if we're stopped. + r.errChan <- errRunnableGroupStopped + r.stop.RUnlock() + continue + } + + // Why is this here? + // When StopAndWait is called, if a runnable is in the process + // of being added, we could end up in a situation where + // the WaitGroup is incremented while StopAndWait has called Wait(), + // which would result in a panic. + r.wg.Add(1) + r.stop.RUnlock() + } + + // Start the runnable. + go func(rn *readyRunnable) { + go func() { + if rn.Check(r.ctx) { + if rn.signalReady { + r.startReadyCh <- rn + } + } + }() + + // If we return, the runnable ended cleanly + // or returned an error to the channel. + // + // We should always decrement the WaitGroup here. + defer r.wg.Done() + + // Start the runnable. + if err := rn.Start(r.ctx); err != nil { + // Check if we're during the shutdown process. + r.stop.RLock() + isStopped := r.stopped + r.stop.RUnlock() + + if isStopped { + // During shutdown, try to send error first (error drain goroutine might still be running) + // but drop if it would block to prevent goroutine leaks + select { + case r.errChan <- err: + // Error sent successfully (error drain goroutine is still running) + default: + // Error drain goroutine has exited, drop error to prevent goroutine leak + if !errors.Is(err, context.Canceled) { // don't log context.Canceled errors as they are expected during shutdown + r.logger.Info("error dropped during shutdown to prevent goroutine leak", "error", err) + } + } + } else { + // During normal operation, always try to send errors (may block briefly) + r.errChan <- err + } + } + }(runnable) + } +} + +// Add should be able to be called before and after Start, but not after StopAndWait. +// Add should return an error when called during StopAndWait. +func (r *runnableGroup) Add(rn Runnable, ready runnableCheck) error { + r.stop.RLock() + if r.stopped { + r.stop.RUnlock() + return errRunnableGroupStopped + } + r.stop.RUnlock() + + if ready == nil { + ready = func(_ context.Context) bool { return true } + } + + readyRunnable := &readyRunnable{ + Runnable: rn, + Check: ready, + } + + // Handle start. + // If the overall runnable group isn't started yet + // we want to buffer the runnables and let Start() + // queue them up again later. + { + r.start.Lock() + + // Check if we're already started. + if !r.started { + // Store the runnable in the internal if not. + r.startQueue = append(r.startQueue, readyRunnable) + r.start.Unlock() + return nil + } + r.start.Unlock() + } + + // Recheck if we're stopped and hold the readlock, given that the stop and start can be called + // at the same time, we can end up in a situation where the runnable is added + // after the group is stopped and the channel is closed. + r.stop.RLock() + defer r.stop.RUnlock() + if r.stopped { + return errRunnableGroupStopped + } + + // Enqueue the runnable. + r.ch <- readyRunnable + return nil +} + +// StopAndWait waits for all the runnables to finish before returning. +func (r *runnableGroup) StopAndWait(ctx context.Context) { + r.stopOnce.Do(func() { + // Close the reconciler channel once we're done. + defer func() { + r.stop.Lock() + close(r.ch) + r.stop.Unlock() + }() + + _ = r.Start(ctx) + r.stop.Lock() + // Store the stopped variable so we don't accept any new + // runnables for the time being. + r.stopped = true + r.stop.Unlock() + + // Cancel the internal channel. + r.cancel() + + done := make(chan struct{}) + go func() { + defer close(done) + // Wait for all the runnables to finish. + r.wg.Wait() + }() + + select { + case <-done: + // We're done, exit. + case <-ctx.Done(): + // Calling context has expired, exit. + } + }) +} diff --git a/pkg/manager/runnable_group_test.go b/pkg/manager/runnable_group_test.go new file mode 100644 index 0000000000..6f9b879e0e --- /dev/null +++ b/pkg/manager/runnable_group_test.go @@ -0,0 +1,386 @@ +package manager + +import ( + "context" + "errors" + "fmt" + "sync/atomic" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/utils/ptr" + + "sigs.k8s.io/controller-runtime/pkg/cache/informertest" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +var _ = Describe("runnables", func() { + errCh := make(chan error) + + It("should be able to create a new runnables object", func() { + Expect(newRunnables(defaultBaseContext, errCh)).ToNot(BeNil()) + }) + + It("should add HTTP servers to the appropriate group", func() { + server := &Server{} + r := newRunnables(defaultBaseContext, errCh) + Expect(r.Add(server)).To(Succeed()) + Expect(r.HTTPServers.startQueue).To(HaveLen(1)) + Expect(r.Others.startQueue).To(BeEmpty()) + }) + + It("should add caches to the appropriate group", func() { + cache := &cacheProvider{cache: &informertest.FakeInformers{Error: fmt.Errorf("expected error")}} + r := newRunnables(defaultBaseContext, errCh) + Expect(r.Add(cache)).To(Succeed()) + Expect(r.Caches.startQueue).To(HaveLen(1)) + Expect(r.Others.startQueue).To(BeEmpty()) + }) + + It("should add webhooks to the appropriate group", func() { + webhook := webhook.NewServer(webhook.Options{}) + r := newRunnables(defaultBaseContext, errCh) + Expect(r.Add(webhook)).To(Succeed()) + Expect(r.Webhooks.startQueue).To(HaveLen(1)) + Expect(r.Others.startQueue).To(BeEmpty()) + }) + + It("should add any runnable to the leader election group", func() { + err := errors.New("runnable func") + runnable := RunnableFunc(func(c context.Context) error { + return err + }) + + r := newRunnables(defaultBaseContext, errCh) + Expect(r.Add(runnable)).To(Succeed()) + Expect(r.LeaderElection.startQueue).To(HaveLen(1)) + Expect(r.Others.startQueue).To(BeEmpty()) + }) + + It("should add WarmupRunnable to the Warmup and LeaderElection group", func() { + warmupRunnable := newWarmupRunnableFunc( + func(c context.Context) error { + <-c.Done() + return nil + }, + func(c context.Context) error { return nil }, + ) + + r := newRunnables(defaultBaseContext, errCh) + Expect(r.Add(warmupRunnable)).To(Succeed()) + Expect(r.Warmup.startQueue).To(HaveLen(1)) + Expect(r.LeaderElection.startQueue).To(HaveLen(1)) + Expect(r.Others.startQueue).To(BeEmpty()) + }) + + It("should add WarmupRunnable that doesn't needs leader election to warmup group only", func() { + warmupRunnable := newLeaderElectionAndWarmupRunnable( + func(c context.Context) error { + <-c.Done() + return nil + }, + func(c context.Context) error { return nil }, + false, + ) + + r := newRunnables(defaultBaseContext, errCh) + Expect(r.Add(warmupRunnable)).To(Succeed()) + + Expect(r.Warmup.startQueue).To(HaveLen(1)) + Expect(r.LeaderElection.startQueue).To(BeEmpty()) + Expect(r.Others.startQueue).To(HaveLen(1)) + }) + + It("should add WarmupRunnable that needs leader election to Warmup and LeaderElection group, not Others", func() { + warmupRunnable := newLeaderElectionAndWarmupRunnable( + func(c context.Context) error { + <-c.Done() + return nil + }, + func(c context.Context) error { return nil }, + true, + ) + + r := newRunnables(defaultBaseContext, errCh) + Expect(r.Add(warmupRunnable)).To(Succeed()) + + Expect(r.Warmup.startQueue).To(HaveLen(1)) + Expect(r.LeaderElection.startQueue).To(HaveLen(1)) + Expect(r.Others.startQueue).To(BeEmpty()) + }) + + It("should execute the Warmup function when Warmup group is started", func(ctx SpecContext) { + var warmupExecuted atomic.Bool + + warmupRunnable := newWarmupRunnableFunc( + func(c context.Context) error { + <-c.Done() + return nil + }, + func(c context.Context) error { + warmupExecuted.Store(true) + return nil + }, + ) + + r := newRunnables(defaultBaseContext, errCh) + Expect(r.Add(warmupRunnable)).To(Succeed()) + + // Start the Warmup group + Expect(r.Warmup.Start(ctx)).To(Succeed()) + + // Verify warmup function was called + Expect(warmupExecuted.Load()).To(BeTrue()) + }) + + It("should propagate errors from Warmup function to error channel", func(ctx SpecContext) { + expectedErr := fmt.Errorf("expected warmup error") + + warmupRunnable := newWarmupRunnableFunc( + func(c context.Context) error { + <-c.Done() + return nil + }, + func(c context.Context) error { return expectedErr }, + ) + + testErrChan := make(chan error, 1) + r := newRunnables(defaultBaseContext, testErrChan) + Expect(r.Add(warmupRunnable)).To(Succeed()) + + // Start the Warmup group in a goroutine + go func() { + Expect(r.Warmup.Start(ctx)).To(Succeed()) + }() + + // Error from Warmup should be sent to error channel + var receivedErr error + Eventually(func() error { + select { + case receivedErr = <-testErrChan: + return receivedErr + default: + return nil + } + }).Should(Equal(expectedErr)) + }) +}) + +var _ = Describe("runnableGroup", func() { + errCh := make(chan error) + + It("should be able to add new runnables before it starts", func(ctx SpecContext) { + rg := newRunnableGroup(defaultBaseContext, errCh) + Expect(rg.Add(RunnableFunc(func(c context.Context) error { + <-ctx.Done() + return nil + }), nil)).To(Succeed()) + + Expect(rg.Started()).To(BeFalse()) + }) + + It("should be able to add new runnables before and after start", func(ctx SpecContext) { + rg := newRunnableGroup(defaultBaseContext, errCh) + Expect(rg.Add(RunnableFunc(func(c context.Context) error { + <-ctx.Done() + return nil + }), nil)).To(Succeed()) + Expect(rg.Start(ctx)).To(Succeed()) + Expect(rg.Started()).To(BeTrue()) + Expect(rg.Add(RunnableFunc(func(c context.Context) error { + <-ctx.Done() + return nil + }), nil)).To(Succeed()) + }) + + It("should be able to add new runnables before and after start concurrently", func(ctx SpecContext) { + rg := newRunnableGroup(defaultBaseContext, errCh) + + go func() { + defer GinkgoRecover() + <-time.After(50 * time.Millisecond) + Expect(rg.Start(ctx)).To(Succeed()) + }() + + for i := 0; i < 20; i++ { + go func(i int) { + defer GinkgoRecover() + + <-time.After(time.Duration(i) * 10 * time.Millisecond) + Expect(rg.Add(RunnableFunc(func(c context.Context) error { + <-ctx.Done() + return nil + }), nil)).To(Succeed()) + }(i) + } + }) + + It("should be able to close the group and wait for all runnables to finish", func(specCtx SpecContext) { + ctx, cancel := context.WithCancel(specCtx) + + exited := ptr.To(int64(0)) + rg := newRunnableGroup(defaultBaseContext, errCh) + for i := 0; i < 10; i++ { + Expect(rg.Add(RunnableFunc(func(c context.Context) error { + defer atomic.AddInt64(exited, 1) + <-ctx.Done() + <-time.After(time.Duration(i) * 10 * time.Millisecond) + return nil + }), nil)).To(Succeed()) + } + Expect(rg.Start(ctx)).To(Succeed()) + + // Cancel the context, asking the runnables to exit. + cancel() + rg.StopAndWait(specCtx) + + Expect(rg.Add(RunnableFunc(func(c context.Context) error { + return nil + }), nil)).ToNot(Succeed()) + + Expect(atomic.LoadInt64(exited)).To(BeNumerically("==", 10)) + }) + + It("should be able to wait for all runnables to be ready at different intervals", func(specCtx SpecContext) { + ctx, cancel := context.WithTimeout(specCtx, 1*time.Second) + defer cancel() + rg := newRunnableGroup(defaultBaseContext, errCh) + + go func() { + defer GinkgoRecover() + <-time.After(50 * time.Millisecond) + Expect(rg.Start(ctx)).To(Succeed()) + }() + + for i := 0; i < 20; i++ { + go func(i int) { + defer GinkgoRecover() + + Expect(rg.Add(RunnableFunc(func(c context.Context) error { + <-ctx.Done() + return nil + }), func(_ context.Context) bool { + <-time.After(time.Duration(i) * 10 * time.Millisecond) + return true + })).To(Succeed()) + }(i) + } + }) + + It("should be able to handle adding runnables while stopping", func(specCtx SpecContext) { + ctx, cancel := context.WithTimeout(specCtx, 10*time.Second) + defer cancel() + rg := newRunnableGroup(defaultBaseContext, errCh) + + go func() { + defer GinkgoRecover() + <-time.After(1 * time.Millisecond) + Expect(rg.Start(ctx)).To(Succeed()) + }() + go func() { + defer GinkgoRecover() + <-time.After(1 * time.Millisecond) + ctx, cancel := context.WithCancel(ctx) + cancel() + rg.StopAndWait(ctx) + }() + + for i := 0; i < 200; i++ { + go func(i int) { + defer GinkgoRecover() + + <-time.After(time.Duration(i) * time.Microsecond) + Expect(rg.Add(RunnableFunc(func(c context.Context) error { + <-ctx.Done() + return nil + }), func(_ context.Context) bool { + return true + })).To(SatisfyAny( + Succeed(), + Equal(errRunnableGroupStopped), + )) + }(i) + } + }) + + It("should not turn ready if some readiness check fail", func(specCtx SpecContext) { + ctx, cancel := context.WithTimeout(specCtx, 2*time.Second) + defer cancel() + rg := newRunnableGroup(defaultBaseContext, errCh) + + go func() { + defer GinkgoRecover() + <-time.After(50 * time.Millisecond) + Expect(rg.Start(ctx)).To(Succeed()) + }() + + for i := 0; i < 20; i++ { + go func(i int) { + defer GinkgoRecover() + + Expect(rg.Add(RunnableFunc(func(c context.Context) error { + <-ctx.Done() + return nil + }), func(_ context.Context) bool { + <-time.After(time.Duration(i) * 10 * time.Millisecond) + return i%2 == 0 // Return false readiness all uneven indexes. + })).To(Succeed()) + }(i) + } + }) +}) + +var _ warmupRunnable = &warmupRunnableFunc{} + +func newWarmupRunnableFunc( + startFunc func(context.Context) error, + warmupFunc func(context.Context) error, +) *warmupRunnableFunc { + return &warmupRunnableFunc{ + startFunc: startFunc, + warmupFunc: warmupFunc, + } +} + +// warmupRunnableFunc is a helper struct that implements WarmupRunnable +// for testing purposes. +type warmupRunnableFunc struct { + startFunc func(context.Context) error + warmupFunc func(context.Context) error +} + +func (r *warmupRunnableFunc) Start(ctx context.Context) error { + return r.startFunc(ctx) +} + +func (r *warmupRunnableFunc) Warmup(ctx context.Context) error { + return r.warmupFunc(ctx) +} + +var _ LeaderElectionRunnable = &leaderElectionAndWarmupRunnable{} +var _ warmupRunnable = &leaderElectionAndWarmupRunnable{} + +// leaderElectionAndWarmupRunnable implements both WarmupRunnable and LeaderElectionRunnable +type leaderElectionAndWarmupRunnable struct { + *warmupRunnableFunc + needLeaderElection bool +} + +func newLeaderElectionAndWarmupRunnable( + startFunc func(context.Context) error, + warmupFunc func(context.Context) error, + needLeaderElection bool, +) *leaderElectionAndWarmupRunnable { + return &leaderElectionAndWarmupRunnable{ + warmupRunnableFunc: &warmupRunnableFunc{ + startFunc: startFunc, + warmupFunc: warmupFunc, + }, + needLeaderElection: needLeaderElection, + } +} + +func (r leaderElectionAndWarmupRunnable) NeedLeaderElection() bool { + return r.needLeaderElection +} diff --git a/pkg/manager/server.go b/pkg/manager/server.go new file mode 100644 index 0000000000..1983165da8 --- /dev/null +++ b/pkg/manager/server.go @@ -0,0 +1,109 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package manager + +import ( + "context" + "errors" + "net" + "net/http" + "time" + + crlog "sigs.k8s.io/controller-runtime/pkg/log" +) + +var ( + _ Runnable = (*Server)(nil) + _ LeaderElectionRunnable = (*Server)(nil) +) + +// Server is a general purpose HTTP server Runnable for a manager. +// It is used to serve some internal handlers for health probes and profiling, +// but it can also be used to run custom servers. +type Server struct { + // Name is an optional string that describes the purpose of the server. It is used in logs to distinguish + // among multiple servers. + Name string + + // Server is the HTTP server to run. It is required. + Server *http.Server + + // Listener is an optional listener to use. If not set, the server start a listener using the server.Addr. + // Using a listener is useful when the port reservation needs to happen in advance of this runnable starting. + Listener net.Listener + + // OnlyServeWhenLeader is an optional bool that indicates that the server should only be started when the manager is the leader. + OnlyServeWhenLeader bool + + // ShutdownTimeout is an optional duration that indicates how long to wait for the server to shutdown gracefully. If not set, + // the server will wait indefinitely for all connections to close. + ShutdownTimeout *time.Duration +} + +// Start starts the server. It will block until the server is stopped or an error occurs. +func (s *Server) Start(ctx context.Context) error { + log := crlog.FromContext(ctx) + if s.Name != "" { + log = log.WithValues("name", s.Name) + } + log = log.WithValues("addr", s.addr()) + + serverShutdown := make(chan struct{}) + go func() { + <-ctx.Done() + log.Info("shutting down server") + + shutdownCtx := context.Background() + if s.ShutdownTimeout != nil { + var shutdownCancel context.CancelFunc + shutdownCtx, shutdownCancel = context.WithTimeout(shutdownCtx, *s.ShutdownTimeout) + defer shutdownCancel() + } + + if err := s.Server.Shutdown(shutdownCtx); err != nil { + log.Error(err, "error shutting down server") + } + close(serverShutdown) + }() + + log.Info("starting server") + if err := s.serve(); err != nil && !errors.Is(err, http.ErrServerClosed) { + return err + } + + <-serverShutdown + return nil +} + +// NeedLeaderElection returns true if the server should only be started when the manager is the leader. +func (s *Server) NeedLeaderElection() bool { + return s.OnlyServeWhenLeader +} + +func (s *Server) addr() string { + if s.Listener != nil { + return s.Listener.Addr().String() + } + return s.Server.Addr +} + +func (s *Server) serve() error { + if s.Listener != nil { + return s.Server.Serve(s.Listener) + } + return s.Server.ListenAndServe() +} diff --git a/pkg/manager/signals/signal.go b/pkg/manager/signals/signal.go index 08eaef7b42..a79cfb42df 100644 --- a/pkg/manager/signals/signal.go +++ b/pkg/manager/signals/signal.go @@ -17,27 +17,29 @@ limitations under the License. package signals import ( + "context" "os" "os/signal" ) var onlyOneSignalHandler = make(chan struct{}) -// SetupSignalHandler registers for SIGTERM and SIGINT. A stop channel is returned -// which is closed on one of these signals. If a second signal is caught, the program +// SetupSignalHandler registers for SIGTERM and SIGINT. A context is returned +// which is canceled on one of these signals. If a second signal is caught, the program // is terminated with exit code 1. -func SetupSignalHandler() (stopCh <-chan struct{}) { +func SetupSignalHandler() context.Context { close(onlyOneSignalHandler) // panics when called twice - stop := make(chan struct{}) + ctx, cancel := context.WithCancel(context.Background()) + c := make(chan os.Signal, 2) signal.Notify(c, shutdownSignals...) go func() { <-c - close(stop) + cancel() <-c os.Exit(1) // second signal. Exit directly. }() - return stop + return ctx } diff --git a/pkg/manager/signals/signal_posix.go b/pkg/manager/signals/signal_posix.go index 9bdb4e7418..a0f00a7321 100644 --- a/pkg/manager/signals/signal_posix.go +++ b/pkg/manager/signals/signal_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/pkg/manager/signals/signal_test.go b/pkg/manager/signals/signal_test.go index a2477da69c..134937e012 100644 --- a/pkg/manager/signals/signal_test.go +++ b/pkg/manager/signals/signal_test.go @@ -23,7 +23,7 @@ import ( "sync" "time" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -32,11 +32,11 @@ var _ = Describe("runtime signal", func() { Context("SignalHandler Test", func() { It("test signal handler", func() { - stop := SetupSignalHandler() + ctx := SetupSignalHandler() task := &Task{ ticker: time.NewTicker(time.Second * 2), } - c := make(chan os.Signal) + c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) task.wg.Add(1) go func(c chan os.Signal) { @@ -47,7 +47,7 @@ var _ = Describe("runtime signal", func() { select { case sig := <-c: fmt.Printf("Got %s signal. Aborting...\n", sig) - case _, ok := <-stop: + case _, ok := <-ctx.Done(): Expect(ok).To(BeFalse()) } }) diff --git a/pkg/manager/signals/signals_suite_test.go b/pkg/manager/signals/signals_suite_test.go index 2a4f32d751..bae6d72ed5 100644 --- a/pkg/manager/signals/signals_suite_test.go +++ b/pkg/manager/signals/signals_suite_test.go @@ -20,14 +20,13 @@ import ( "os/signal" "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "sigs.k8s.io/controller-runtime/pkg/envtest" ) func TestSource(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, "Runtime Signal Suite", []Reporter{envtest.NewlineReporter{}}) + RunSpecs(t, "Runtime Signal Suite") } var _ = BeforeSuite(func() { diff --git a/pkg/manager/testdata/custom-config.yaml b/pkg/manager/testdata/custom-config.yaml new file mode 100644 index 0000000000..a15c9f8e5c --- /dev/null +++ b/pkg/manager/testdata/custom-config.yaml @@ -0,0 +1,3 @@ +apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 +kind: CustomControllerManagerConfiguration +customValue: foo diff --git a/pkg/metrics/client_go_adapter.go b/pkg/metrics/client_go_adapter.go index 3b2c316280..ff28998c44 100644 --- a/pkg/metrics/client_go_adapter.go +++ b/pkg/metrics/client_go_adapter.go @@ -17,26 +17,10 @@ limitations under the License. package metrics import ( - "net/url" - "time" - - "k8s.io/apimachinery/pkg/util/runtime" + "context" "github.com/prometheus/client_golang/prometheus" - reflectormetrics "k8s.io/client-go/tools/cache" clientmetrics "k8s.io/client-go/tools/metrics" - workqueuemetrics "k8s.io/client-go/util/workqueue" -) - -const ( - workQueueSubsystem = "workqueue" - depthKey = "depth" - addsKey = "adds_total" - queueLatencyKey = "queue_duration_seconds" - workDurationKey = "work_duration_seconds" - unfinishedWorkKey = "unfinished_work_seconds" - longestRunningProcessorKey = "longest_running_processor_seconds" - retriesKey = "retries_total" ) // this file contains setup logic to initialize the myriad of places @@ -44,16 +28,7 @@ const ( // from Kubernetes so that we match the core controllers. var ( - // client metrics - - requestLatency = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "rest_client_request_latency_seconds", - Help: "Request latency in seconds. Broken down by verb and URL.", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 10), - }, - []string{"verb", "url"}, - ) + // client metrics. requestResult = prometheus.NewCounterVec( prometheus.CounterOpts{ @@ -62,157 +37,24 @@ var ( }, []string{"code", "method", "host"}, ) - - // reflector metrics - - // TODO(directxman12): update these to be histograms once the metrics overhaul KEP - // PRs start landing. - - reflectorSubsystem = "reflector" - - listsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ - Subsystem: reflectorSubsystem, - Name: "lists_total", - Help: "Total number of API lists done by the reflectors", - }, []string{"name"}) - - listsDuration = prometheus.NewSummaryVec(prometheus.SummaryOpts{ - Subsystem: reflectorSubsystem, - Name: "list_duration_seconds", - Help: "How long an API list takes to return and decode for the reflectors", - }, []string{"name"}) - - itemsPerList = prometheus.NewSummaryVec(prometheus.SummaryOpts{ - Subsystem: reflectorSubsystem, - Name: "items_per_list", - Help: "How many items an API list returns to the reflectors", - }, []string{"name"}) - - watchesTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ - Subsystem: reflectorSubsystem, - Name: "watches_total", - Help: "Total number of API watches done by the reflectors", - }, []string{"name"}) - - shortWatchesTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ - Subsystem: reflectorSubsystem, - Name: "short_watches_total", - Help: "Total number of short API watches done by the reflectors", - }, []string{"name"}) - - watchDuration = prometheus.NewSummaryVec(prometheus.SummaryOpts{ - Subsystem: reflectorSubsystem, - Name: "watch_duration_seconds", - Help: "How long an API watch takes to return and decode for the reflectors", - }, []string{"name"}) - - itemsPerWatch = prometheus.NewSummaryVec(prometheus.SummaryOpts{ - Subsystem: reflectorSubsystem, - Name: "items_per_watch", - Help: "How many items an API watch returns to the reflectors", - }, []string{"name"}) - - lastResourceVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Subsystem: reflectorSubsystem, - Name: "last_resource_version", - Help: "Last resource version seen for the reflectors", - }, []string{"name"}) - - // workqueue metrics - - depth = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Subsystem: workQueueSubsystem, - Name: depthKey, - Help: "Current depth of workqueue", - }, []string{"name"}) - - adds = prometheus.NewCounterVec(prometheus.CounterOpts{ - Subsystem: workQueueSubsystem, - Name: addsKey, - Help: "Total number of adds handled by workqueue", - }, []string{"name"}) - - latency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Subsystem: workQueueSubsystem, - Name: queueLatencyKey, - Help: "How long in seconds an item stays in workqueue before being requested.", - Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), - }, []string{"name"}) - - workDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Subsystem: workQueueSubsystem, - Name: workDurationKey, - Help: "How long in seconds processing an item from workqueue takes.", - Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), - }, []string{"name"}) - - unfinishedWork = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Subsystem: workQueueSubsystem, - Name: unfinishedWorkKey, - Help: "How many seconds of work has done that " + - "is in progress and hasn't been observed by work_duration. Large " + - "values indicate stuck threads. One can deduce the number of stuck " + - "threads by observing the rate at which this increases.", - }, []string{"name"}) - - longestRunning = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Subsystem: workQueueSubsystem, - Name: longestRunningProcessorKey, - Help: "How many seconds has the longest running " + - "processor for workqueue been running.", - }, []string{"name"}) - - retries = prometheus.NewCounterVec(prometheus.CounterOpts{ - Subsystem: workQueueSubsystem, - Name: retriesKey, - Help: "Total number of retries handled by workqueue", - }, []string{"name"}) ) func init() { registerClientMetrics() - registerReflectorMetrics() - registerWorkqueueMetrics() } -// registerClientMetrics sets up the client latency metrics from client-go +// registerClientMetrics sets up the client latency metrics from client-go. func registerClientMetrics() { // register the metrics with our registry - Registry.MustRegister(requestLatency) Registry.MustRegister(requestResult) // register the metrics with client-go - clientmetrics.Register(&latencyAdapter{metric: requestLatency}, &resultAdapter{metric: requestResult}) -} - -// registerReflectorMetrics sets up reflector (reconile) loop metrics -func registerReflectorMetrics() { - Registry.MustRegister(listsTotal) - Registry.MustRegister(listsDuration) - Registry.MustRegister(itemsPerList) - Registry.MustRegister(watchesTotal) - Registry.MustRegister(shortWatchesTotal) - Registry.MustRegister(watchDuration) - Registry.MustRegister(itemsPerWatch) - Registry.MustRegister(lastResourceVersion) - - reflectormetrics.SetReflectorMetricsProvider(reflectorMetricsProvider{}) -} - -// registerWorkQueueMetrics sets up workqueue (other reconcile) metrics -func registerWorkqueueMetrics() { - Registry.MustRegister(depth) - Registry.MustRegister(adds) - Registry.MustRegister(latency) - Registry.MustRegister(workDuration) - Registry.MustRegister(retries) - Registry.MustRegister(longestRunning) - Registry.MustRegister(unfinishedWork) - - workqueuemetrics.SetProvider(workqueueMetricsProvider{}) + clientmetrics.Register(clientmetrics.RegisterOpts{ + RequestResult: &resultAdapter{metric: requestResult}, + }) } -// this section contains adapters, implementations, and other sundry organic, artisinally +// this section contains adapters, implementations, and other sundry organic, artisanally // hand-crafted syntax trees required to convince client-go that it actually wants to let // someone use its metrics. @@ -220,167 +62,10 @@ func registerWorkqueueMetrics() { // copied (more-or-less directly) from k8s.io/kubernetes setup code // (which isn't anywhere in an easily-importable place). -type latencyAdapter struct { - metric *prometheus.HistogramVec -} - -func (l *latencyAdapter) Observe(verb string, u url.URL, latency time.Duration) { - l.metric.WithLabelValues(verb, u.String()).Observe(latency.Seconds()) -} - type resultAdapter struct { metric *prometheus.CounterVec } -func (r *resultAdapter) Increment(code, method, host string) { +func (r *resultAdapter) Increment(_ context.Context, code, method, host string) { r.metric.WithLabelValues(code, method, host).Inc() } - -// Reflector metrics provider (method #2 for client-go metrics), -// copied (more-or-less directly) from k8s.io/kubernetes setup code -// (which isn't anywhere in an easily-importable place). - -type reflectorMetricsProvider struct{} - -func (reflectorMetricsProvider) NewListsMetric(name string) reflectormetrics.CounterMetric { - return listsTotal.WithLabelValues(name) -} - -func (reflectorMetricsProvider) NewListDurationMetric(name string) reflectormetrics.SummaryMetric { - return listsDuration.WithLabelValues(name) -} - -func (reflectorMetricsProvider) NewItemsInListMetric(name string) reflectormetrics.SummaryMetric { - return itemsPerList.WithLabelValues(name) -} - -func (reflectorMetricsProvider) NewWatchesMetric(name string) reflectormetrics.CounterMetric { - return watchesTotal.WithLabelValues(name) -} - -func (reflectorMetricsProvider) NewShortWatchesMetric(name string) reflectormetrics.CounterMetric { - return shortWatchesTotal.WithLabelValues(name) -} - -func (reflectorMetricsProvider) NewWatchDurationMetric(name string) reflectormetrics.SummaryMetric { - return watchDuration.WithLabelValues(name) -} - -func (reflectorMetricsProvider) NewItemsInWatchMetric(name string) reflectormetrics.SummaryMetric { - return itemsPerWatch.WithLabelValues(name) -} - -func (reflectorMetricsProvider) NewLastResourceVersionMetric(name string) reflectormetrics.GaugeMetric { - return lastResourceVersion.WithLabelValues(name) -} - -// Workqueue metrics (method #3 for client-go metrics), -// copied (more-or-less directly) from k8s.io/kubernetes setup code -// (which isn't anywhere in an easily-importable place). -// TODO(directxman12): stop "cheating" and calling histograms summaries when we pull in the latest deps - -type workqueueMetricsProvider struct{} - -func (workqueueMetricsProvider) NewDepthMetric(name string) workqueuemetrics.GaugeMetric { - return depth.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewAddsMetric(name string) workqueuemetrics.CounterMetric { - return adds.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewLatencyMetric(name string) workqueuemetrics.HistogramMetric { - return latency.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewWorkDurationMetric(name string) workqueuemetrics.HistogramMetric { - return workDuration.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueuemetrics.SettableGaugeMetric { - return unfinishedWork.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueuemetrics.SettableGaugeMetric { - return longestRunning.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewRetriesMetric(name string) workqueuemetrics.CounterMetric { - return retries.WithLabelValues(name) -} - -func (workqueueMetricsProvider) NewDeprecatedDepthMetric(name string) workqueuemetrics.GaugeMetric { - depth := prometheus.NewGauge(prometheus.GaugeOpts{ - Subsystem: name, - Name: "depth", - Help: "Current depth of workqueue: " + name, - }) - runtime.HandleError(Registry.Register(depth)) - return depth -} - -func (workqueueMetricsProvider) NewDeprecatedAddsMetric(name string) workqueuemetrics.CounterMetric { - adds := prometheus.NewCounter(prometheus.CounterOpts{ - Subsystem: name, - Name: "adds", - Help: "Total number of adds handled by workqueue: " + name, - }) - runtime.HandleError(Registry.Register(adds)) - return adds -} - -func (workqueueMetricsProvider) NewDeprecatedLatencyMetric(name string) workqueuemetrics.SummaryMetric { - latency := prometheus.NewSummary(prometheus.SummaryOpts{ - Subsystem: name, - Name: "queue_latency", - Help: "How long an item stays in workqueue" + name + " before being requested.", - ConstLabels: prometheus.Labels{"name": name}, - }) - runtime.HandleError(Registry.Register(latency)) - return latency -} - -func (workqueueMetricsProvider) NewDeprecatedWorkDurationMetric(name string) workqueuemetrics.SummaryMetric { - workDuration := prometheus.NewSummary(prometheus.SummaryOpts{ - Subsystem: name, - Name: "work_duration", - Help: "How long processing an item from workqueue" + name + " takes.", - ConstLabels: prometheus.Labels{"name": name}, - }) - runtime.HandleError(Registry.Register(workDuration)) - return workDuration -} - -func (workqueueMetricsProvider) NewDeprecatedUnfinishedWorkSecondsMetric(name string) workqueuemetrics.SettableGaugeMetric { - unfinishedWork := prometheus.NewGauge(prometheus.GaugeOpts{ - Subsystem: name, - Name: "unfinished_work_seconds", - Help: "How many seconds of work " + name + " has done that " + - "is in progress and hasn't been observed by work_duration. Large " + - "values indicate stuck threads. One can deduce the number of stuck " + - "threads by observing the rate at which this increases.", - }) - runtime.HandleError(Registry.Register(unfinishedWork)) - return unfinishedWork -} - -func (workqueueMetricsProvider) NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name string) workqueuemetrics.SettableGaugeMetric { - longestRunning := prometheus.NewGauge(prometheus.GaugeOpts{ - Subsystem: name, - Name: "longest_running_processor_microseconds", - Help: "How many microseconds has the longest running " + - "processor for " + name + " been running.", - }) - runtime.HandleError(Registry.Register(longestRunning)) - return longestRunning -} - -func (workqueueMetricsProvider) NewDeprecatedRetriesMetric(name string) workqueuemetrics.CounterMetric { - retries := prometheus.NewCounter(prometheus.CounterOpts{ - Subsystem: name, - Name: "retries", - Help: "Total number of retries handled by workqueue: " + name, - }) - runtime.HandleError(Registry.Register(retries)) - return retries -} diff --git a/pkg/metrics/filters/filter_suite_test.go b/pkg/metrics/filters/filter_suite_test.go new file mode 100644 index 0000000000..bdd21be491 --- /dev/null +++ b/pkg/metrics/filters/filter_suite_test.go @@ -0,0 +1,80 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "fmt" + "net/http" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" +) + +func TestSource(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Filters Suite") +} + +var testenv *envtest.Environment +var cfg *rest.Config +var clientset *kubernetes.Clientset + +// clientTransport is used to force-close keep-alives in tests that check for leaks. +var clientTransport *http.Transport + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + testenv = &envtest.Environment{} + + var err error + cfg, err = testenv.Start() + Expect(err).NotTo(HaveOccurred()) + + cfg.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { + // NB(directxman12): we can't set Transport *and* use TLS options, + // so we grab the transport right after it gets created so that we can + // type-assert on it (hopefully)? + // hopefully this doesn't break 🤞 + transport, isTransport := rt.(*http.Transport) + if !isTransport { + panic(fmt.Sprintf("wasn't able to grab underlying transport from REST client's RoundTripper, can't figure out how to close keep-alives: expected an *http.Transport, got %#v", rt)) + } + clientTransport = transport + return rt + } + + clientset, err = kubernetes.NewForConfig(cfg) + Expect(err).NotTo(HaveOccurred()) + + // Prevent the metrics listener being created + metricsserver.DefaultBindAddress = "0" +}) + +var _ = AfterSuite(func() { + Expect(testenv.Stop()).To(Succeed()) + + // Put the DefaultBindAddress back + metricsserver.DefaultBindAddress = ":8080" +}) diff --git a/pkg/metrics/filters/filters.go b/pkg/metrics/filters/filters.go new file mode 100644 index 0000000000..1659502bcf --- /dev/null +++ b/pkg/metrics/filters/filters.go @@ -0,0 +1,122 @@ +package filters + +import ( + "fmt" + "net/http" + "strings" + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/apis/apiserver" + "k8s.io/apiserver/pkg/authentication/authenticatorfactory" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/authorization/authorizerfactory" + authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" + authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" + "k8s.io/client-go/rest" + + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" +) + +// WithAuthenticationAndAuthorization provides a metrics.Filter for authentication and authorization. +// Metrics will be authenticated (via TokenReviews) and authorized (via SubjectAccessReviews) with the +// kube-apiserver. +// For the authentication and authorization the controller needs a ClusterRole +// with the following rules: +// * apiGroups: authentication.k8s.io, resources: tokenreviews, verbs: create +// * apiGroups: authorization.k8s.io, resources: subjectaccessreviews, verbs: create +// +// To scrape metrics e.g. via Prometheus the client needs a ClusterRole +// with the following rule: +// * nonResourceURLs: "/metrics", verbs: get +// +// Note: Please note that configuring this metrics provider will introduce a dependency to "k8s.io/apiserver" +// to your go module. +func WithAuthenticationAndAuthorization(config *rest.Config, httpClient *http.Client) (metricsserver.Filter, error) { + authenticationV1Client, err := authenticationv1.NewForConfigAndClient(config, httpClient) + if err != nil { + return nil, err + } + authorizationV1Client, err := authorizationv1.NewForConfigAndClient(config, httpClient) + if err != nil { + return nil, err + } + + authenticatorConfig := authenticatorfactory.DelegatingAuthenticatorConfig{ + Anonymous: &apiserver.AnonymousAuthConfig{Enabled: false}, // Require authentication. + CacheTTL: 1 * time.Minute, + TokenAccessReviewClient: authenticationV1Client, + TokenAccessReviewTimeout: 10 * time.Second, + // wait.Backoff is copied from: https://github.com/kubernetes/apiserver/blob/v0.29.0/pkg/server/options/authentication.go#L43-L50 + // options.DefaultAuthWebhookRetryBackoff is not used to avoid a dependency on "k8s.io/apiserver/pkg/server/options". + WebhookRetryBackoff: &wait.Backoff{ + Duration: 500 * time.Millisecond, + Factor: 1.5, + Jitter: 0.2, + Steps: 5, + }, + } + delegatingAuthenticator, _, err := authenticatorConfig.New() + if err != nil { + return nil, fmt.Errorf("failed to create authenticator: %w", err) + } + + authorizerConfig := authorizerfactory.DelegatingAuthorizerConfig{ + SubjectAccessReviewClient: authorizationV1Client, + AllowCacheTTL: 5 * time.Minute, + DenyCacheTTL: 30 * time.Second, + // wait.Backoff is copied from: https://github.com/kubernetes/apiserver/blob/v0.29.0/pkg/server/options/authentication.go#L43-L50 + // options.DefaultAuthWebhookRetryBackoff is not used to avoid a dependency on "k8s.io/apiserver/pkg/server/options". + WebhookRetryBackoff: &wait.Backoff{ + Duration: 500 * time.Millisecond, + Factor: 1.5, + Jitter: 0.2, + Steps: 5, + }, + } + delegatingAuthorizer, err := authorizerConfig.New() + if err != nil { + return nil, fmt.Errorf("failed to create authorizer: %w", err) + } + + return func(log logr.Logger, handler http.Handler) (http.Handler, error) { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + + res, ok, err := delegatingAuthenticator.AuthenticateRequest(req) + if err != nil { + log.Error(err, "Authentication failed") + http.Error(w, "Authentication failed", http.StatusInternalServerError) + return + } + if !ok { + log.V(4).Info("Authentication failed") + http.Error(w, "Unauthorized", http.StatusUnauthorized) + return + } + + attributes := authorizer.AttributesRecord{ + User: res.User, + Verb: strings.ToLower(req.Method), + Path: req.URL.Path, + } + + authorized, reason, err := delegatingAuthorizer.Authorize(ctx, attributes) + if err != nil { + msg := fmt.Sprintf("Authorization for user %s failed", attributes.User.GetName()) + log.Error(err, msg) + http.Error(w, msg, http.StatusInternalServerError) + return + } + if authorized != authorizer.DecisionAllow { + msg := fmt.Sprintf("Authorization denied for user %s", attributes.User.GetName()) + log.V(4).Info(fmt.Sprintf("%s: %s", msg, reason)) + http.Error(w, msg, http.StatusForbidden) + return + } + + handler.ServeHTTP(w, req) + }), nil + }, nil +} diff --git a/pkg/metrics/filters/filters_test.go b/pkg/metrics/filters/filters_test.go new file mode 100644 index 0000000000..bd107fc56d --- /dev/null +++ b/pkg/metrics/filters/filters_test.go @@ -0,0 +1,271 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package filters + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "net/http" + "reflect" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/prometheus/client_golang/prometheus" + authenticationv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/metrics" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" +) + +var _ = Describe("manger.Manager", func() { + Describe("Start", func() { + Context("should start serving metrics with https and authn/authz", func() { + var srv metricsserver.Server + var defaultServer metricsDefaultServer + var opts manager.Options + var httpClient *http.Client + + BeforeEach(func() { + srv = nil + newMetricsServer := func(options metricsserver.Options, config *rest.Config, httpClient *http.Client) (metricsserver.Server, error) { + var err error + srv, err = metricsserver.NewServer(options, config, httpClient) + if srv != nil { + defaultServer = srv.(metricsDefaultServer) + } + return srv, err + } + opts = manager.Options{ + Metrics: metricsserver.Options{ + BindAddress: ":0", + SecureServing: true, + FilterProvider: WithAuthenticationAndAuthorization, + }, + } + v := reflect.ValueOf(&opts).Elem() + newMetricsField := v.FieldByName("newMetricsServer") + reflect.NewAt(newMetricsField.Type(), newMetricsField.Addr().UnsafePointer()). + Elem(). + Set(reflect.ValueOf(newMetricsServer)) + httpClient = &http.Client{Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }} + }) + + It("should serve metrics in its registry", func(ctx SpecContext) { + one := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "test_one", + Help: "test metric for testing", + }) + one.Inc() + err := metrics.Registry.Register(one) + Expect(err).NotTo(HaveOccurred()) + + m, err := manager.New(cfg, opts) + Expect(err).NotTo(HaveOccurred()) + + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + <-m.Elected() + // Note: Wait until metrics server has been started. A finished leader election + // doesn't guarantee that the metrics server is up. + Eventually(func() string { return defaultServer.GetBindAddr() }, 10*time.Second).ShouldNot(BeEmpty()) + + // Setup service account with rights to "/metrics" + token, cleanup, err := setupServiceAccountForURL(ctx, m.GetClient(), "/metrics") + defer cleanup() + Expect(err).ToNot(HaveOccurred()) + + // GET /metrics with token. + metricsEndpoint := fmt.Sprintf("https://%s/metrics", defaultServer.GetBindAddr()) + req, err := http.NewRequest("GET", metricsEndpoint, nil) + Expect(err).NotTo(HaveOccurred()) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token)) + resp, err := httpClient.Do(req) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + // This is expected as the token has rights for /metrics. + Expect(resp.StatusCode).To(Equal(200)) + + data, err := io.ReadAll(resp.Body) + Expect(err).NotTo(HaveOccurred()) + Expect(string(data)).To(ContainSubstring("%s\n%s\n%s\n", + `# HELP test_one test metric for testing`, + `# TYPE test_one counter`, + `test_one 1`, + )) + + // Unregister will return false if the metric was never registered + ok := metrics.Registry.Unregister(one) + Expect(ok).To(BeTrue()) + }) + + It("should serve extra endpoints", func(ctx SpecContext) { + opts.Metrics.ExtraHandlers = map[string]http.Handler{ + "/debug": http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + _, _ = w.Write([]byte("Some debug info")) + }), + } + m, err := manager.New(cfg, opts) + Expect(err).NotTo(HaveOccurred()) + + go func() { + defer GinkgoRecover() + Expect(m.Start(ctx)).NotTo(HaveOccurred()) + }() + <-m.Elected() + // Note: Wait until metrics server has been started. A finished leader election + // doesn't guarantee that the metrics server is up. + Eventually(func() string { return defaultServer.GetBindAddr() }, 10*time.Second).ShouldNot(BeEmpty()) + + // Setup service account with rights to "/debug" + token, cleanup, err := setupServiceAccountForURL(ctx, m.GetClient(), "/debug") + defer cleanup() + Expect(err).ToNot(HaveOccurred()) + + // GET /debug without token. + endpoint := fmt.Sprintf("https://%s/debug", defaultServer.GetBindAddr()) + req, err := http.NewRequest("GET", endpoint, nil) + Expect(err).NotTo(HaveOccurred()) + resp, err := httpClient.Do(req) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + // This is expected as we didn't send a token. + Expect(resp.StatusCode).To(Equal(401)) + body, err := io.ReadAll(resp.Body) + Expect(err).NotTo(HaveOccurred()) + Expect(string(body)).To(ContainSubstring("Unauthorized")) + + // PUT /debug with token. + req, err = http.NewRequest("PUT", endpoint, nil) + Expect(err).NotTo(HaveOccurred()) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token)) + resp, err = httpClient.Do(req) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + // This is expected as the token has rights for /debug. + Expect(resp.StatusCode).To(Equal(200)) + body, err = io.ReadAll(resp.Body) + Expect(err).NotTo(HaveOccurred()) + Expect(string(body)).To(Equal("Some debug info")) + + // GET /metrics with token (but token only has rights for /debug). + metricsEndpoint := fmt.Sprintf("https://%s/metrics", defaultServer.GetBindAddr()) + req, err = http.NewRequest("GET", metricsEndpoint, nil) + Expect(err).NotTo(HaveOccurred()) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token)) + resp, err = httpClient.Do(req) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + Expect(resp.StatusCode).To(Equal(403)) + body, err = io.ReadAll(resp.Body) + Expect(err).NotTo(HaveOccurred()) + // Authorization denied is expected as the token only has rights for /debug not for /metrics. + Expect(string(body)).To(ContainSubstring("Authorization denied for user system:serviceaccount:default:metrics-test")) + }) + }) + }) +}) + +type metricsDefaultServer interface { + GetBindAddr() string +} + +func setupServiceAccountForURL(ctx context.Context, c client.Client, path string) (string, func(), error) { + createdObjects := []client.Object{} + cleanup := func() { + for _, obj := range createdObjects { + _ = c.Delete(ctx, obj) + } + } + + sa := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metrics-test", + Namespace: metav1.NamespaceDefault, + }, + } + if err := c.Create(ctx, sa); err != nil { + return "", cleanup, err + } + createdObjects = append(createdObjects, sa) + + cr := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metrics-test", + }, + Rules: []rbacv1.PolicyRule{ + { + Verbs: []string{"get", "put"}, + NonResourceURLs: []string{path}, + }, + }, + } + if err := c.Create(ctx, cr); err != nil { + return "", cleanup, err + } + createdObjects = append(createdObjects, cr) + + crb := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metrics-test", + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: "metrics-test", + Namespace: metav1.NamespaceDefault, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: "metrics-test", + }, + } + if err := c.Create(ctx, crb); err != nil { + return "", cleanup, err + } + createdObjects = append(createdObjects, crb) + + tokenRequest := &authenticationv1.TokenRequest{ + Spec: authenticationv1.TokenRequestSpec{ + ExpirationSeconds: ptr.To(int64(2 * 60 * 60)), // 2 hours. + }, + } + if err := c.SubResource("token").Create(ctx, sa, tokenRequest); err != nil { + return "", cleanup, err + } + + if tokenRequest.Status.Token == "" { + return "", cleanup, errors.New("failed to get ServiceAccount token: token should not be empty") + } + + return tokenRequest.Status.Token, cleanup, nil +} diff --git a/pkg/metrics/leaderelection.go b/pkg/metrics/leaderelection.go new file mode 100644 index 0000000000..61e1009d32 --- /dev/null +++ b/pkg/metrics/leaderelection.go @@ -0,0 +1,47 @@ +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "k8s.io/client-go/tools/leaderelection" +) + +// This file is copied and adapted from k8s.io/component-base/metrics/prometheus/clientgo/leaderelection +// which registers metrics to the k8s legacy Registry. We require very +// similar functionality, but must register metrics to a different Registry. + +var ( + leaderGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "leader_election_master_status", + Help: "Gauge of if the reporting system is master of the relevant lease, 0 indicates backup, 1 indicates master. 'name' is the string used to identify the lease. Please make sure to group by name.", + }, []string{"name"}) + + leaderSlowpathCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "leader_election_slowpath_total", + Help: "Total number of slow path exercised in renewing leader leases. 'name' is the string used to identify the lease. Please make sure to group by name.", + }, []string{"name"}) +) + +func init() { + Registry.MustRegister(leaderGauge) + leaderelection.SetProvider(leaderelectionMetricsProvider{}) +} + +type leaderelectionMetricsProvider struct{} + +func (leaderelectionMetricsProvider) NewLeaderMetric() leaderelection.LeaderMetric { + return leaderElectionPrometheusAdapter{} +} + +type leaderElectionPrometheusAdapter struct{} + +func (s leaderElectionPrometheusAdapter) On(name string) { + leaderGauge.WithLabelValues(name).Set(1.0) +} + +func (s leaderElectionPrometheusAdapter) Off(name string) { + leaderGauge.WithLabelValues(name).Set(0.0) +} + +func (leaderElectionPrometheusAdapter) SlowpathExercised(name string) { + leaderSlowpathCounter.WithLabelValues(name).Inc() +} diff --git a/pkg/metrics/listener.go b/pkg/metrics/listener.go deleted file mode 100644 index e9db3a110c..0000000000 --- a/pkg/metrics/listener.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package metrics - -import ( - "fmt" - "net" -) - -// DefaultBindAddress sets the default bind address for the metrics -// listener -// The metrics is off by default. -// TODO: Flip the default by changing DefaultBindAddress back to ":8080" in the v0.2.0. -var DefaultBindAddress = "0" - -// NewListener creates a new TCP listener bound to the given address. -func NewListener(addr string) (net.Listener, error) { - if addr == "" { - // If the metrics bind address is empty, default to ":8080" - addr = DefaultBindAddress - } - - // Add a case to disable metrics altogether - if addr == "0" { - return nil, nil - } - - ln, err := net.Listen("tcp", addr) - if err != nil { - return nil, fmt.Errorf("error listening on %s: %v", addr, err) - } - return ln, nil -} diff --git a/pkg/metrics/registry.go b/pkg/metrics/registry.go index 88d88c324c..ce17124d53 100644 --- a/pkg/metrics/registry.go +++ b/pkg/metrics/registry.go @@ -18,6 +18,13 @@ package metrics import "github.com/prometheus/client_golang/prometheus" +// RegistererGatherer combines both parts of the API of a Prometheus +// registry, both the Registerer and the Gatherer interfaces. +type RegistererGatherer interface { + prometheus.Registerer + prometheus.Gatherer +} + // Registry is a prometheus registry for storing metrics within the -// controller-runtime -var Registry = prometheus.NewRegistry() +// controller-runtime. +var Registry RegistererGatherer = prometheus.NewRegistry() diff --git a/pkg/patterns/operator/doc.go b/pkg/metrics/server/doc.go similarity index 68% rename from pkg/patterns/operator/doc.go rename to pkg/metrics/server/doc.go index 5ccd0791af..4c42f6eed7 100644 --- a/pkg/patterns/operator/doc.go +++ b/pkg/metrics/server/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2023 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,9 +15,12 @@ limitations under the License. */ /* -Package operator serves to redirect users to the application package. - -Operators are the common name for Kubernetes APIs which manage specific applications. e.g. Spark Operator, -Etcd Operator. +Package server provides the metrics server implementation. */ -package operator +package server + +import ( + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("metrics") diff --git a/pkg/metrics/server/server.go b/pkg/metrics/server/server.go new file mode 100644 index 0000000000..939c333f7a --- /dev/null +++ b/pkg/metrics/server/server.go @@ -0,0 +1,340 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "os" + "path/filepath" + "sync" + "time" + + "github.com/go-logr/logr" + "github.com/prometheus/client_golang/prometheus/promhttp" + "k8s.io/client-go/rest" + certutil "k8s.io/client-go/util/cert" + + "sigs.k8s.io/controller-runtime/pkg/certwatcher" + "sigs.k8s.io/controller-runtime/pkg/internal/httpserver" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +const ( + defaultMetricsEndpoint = "/metrics" +) + +// DefaultBindAddress is the default bind address for the metrics server. +var DefaultBindAddress = ":8080" + +// Server is a server that serves metrics. +type Server interface { + // AddExtraHandler adds extra handler served on path to the http server that serves metrics. + AddExtraHandler(path string, handler http.Handler) error + + // NeedLeaderElection implements the LeaderElectionRunnable interface, which indicates + // the metrics server doesn't need leader election. + NeedLeaderElection() bool + + // Start runs the server. + // It will install the metrics related resources depending on the server configuration. + Start(ctx context.Context) error +} + +// Options are all available options for the metrics.Server +type Options struct { + // SecureServing enables serving metrics via https. + // Per default metrics will be served via http. + SecureServing bool + + // BindAddress is the bind address for the metrics server. + // It will be defaulted to ":8080" if unspecified. + // Set this to "0" to disable the metrics server. + BindAddress string + + // ExtraHandlers contains a map of handlers (by path) which will be added to the metrics server. + // This might be useful to register diagnostic endpoints e.g. pprof. + // Note that pprof endpoints are meant to be sensitive and shouldn't be exposed publicly. + // If the simple path -> handler mapping offered here is not enough, a new http + // server/listener should be added as Runnable to the manager via the Add method. + ExtraHandlers map[string]http.Handler + + // FilterProvider provides a filter which is a func that is added around + // the metrics and the extra handlers on the metrics server. + // This can be e.g. used to enforce authentication and authorization on the handlers + // endpoint by setting this field to filters.WithAuthenticationAndAuthorization. + FilterProvider func(c *rest.Config, httpClient *http.Client) (Filter, error) + + // CertDir is the directory that contains the server key and certificate. Defaults to + // /k8s-metrics-server/serving-certs. + // + // Note: This option is only used when TLSOpts does not set GetCertificate. + // Note: If certificate or key doesn't exist a self-signed certificate will be used. + CertDir string + + // CertName is the server certificate name. Defaults to tls.crt. + // + // Note: This option is only used when TLSOpts does not set GetCertificate. + // Note: If certificate or key doesn't exist a self-signed certificate will be used. + CertName string + + // KeyName is the server key name. Defaults to tls.key. + // + // Note: This option is only used when TLSOpts does not set GetCertificate. + // Note: If certificate or key doesn't exist a self-signed certificate will be used. + KeyName string + + // TLSOpts is used to allow configuring the TLS config used for the server. + // This also allows providing a certificate via GetCertificate. + TLSOpts []func(*tls.Config) + + // ListenConfig contains options for listening to an address on the metric server. + ListenConfig net.ListenConfig +} + +// Filter is a func that is added around metrics and extra handlers on the metrics server. +type Filter func(log logr.Logger, handler http.Handler) (http.Handler, error) + +// NewServer constructs a new metrics.Server from the provided options. +func NewServer(o Options, config *rest.Config, httpClient *http.Client) (Server, error) { + o.setDefaults() + + // Skip server creation if metrics are disabled. + if o.BindAddress == "0" { + return nil, nil + } + + // Validate that ExtraHandlers is not overwriting the default /metrics endpoint. + if o.ExtraHandlers != nil { + if _, ok := o.ExtraHandlers[defaultMetricsEndpoint]; ok { + return nil, fmt.Errorf("overriding builtin %s endpoint is not allowed", defaultMetricsEndpoint) + } + } + + // Create the metrics filter if a FilterProvider is set. + var metricsFilter Filter + if o.FilterProvider != nil { + var err error + metricsFilter, err = o.FilterProvider(config, httpClient) + if err != nil { + return nil, fmt.Errorf("filter provider failed to create filter for the metrics server: %w", err) + } + } + + return &defaultServer{ + metricsFilter: metricsFilter, + options: o, + }, nil +} + +// defaultServer is the default implementation used for Server. +type defaultServer struct { + options Options + + // metricsFilter is a filter which is added around + // the metrics and the extra handlers on the metrics server. + metricsFilter Filter + + // mu protects access to the bindAddr field. + mu sync.RWMutex + + // bindAddr is used to store the bindAddr after the listener has been created. + // This is used during testing to figure out the port that has been chosen randomly. + bindAddr string +} + +// setDefaults does defaulting for the Server. +func (o *Options) setDefaults() { + if o.BindAddress == "" { + o.BindAddress = DefaultBindAddress + } + + if len(o.CertDir) == 0 { + o.CertDir = filepath.Join(os.TempDir(), "k8s-metrics-server", "serving-certs") + } + + if len(o.CertName) == 0 { + o.CertName = "tls.crt" + } + + if len(o.KeyName) == 0 { + o.KeyName = "tls.key" + } +} + +// NeedLeaderElection implements the LeaderElectionRunnable interface, which indicates +// the metrics server doesn't need leader election. +func (*defaultServer) NeedLeaderElection() bool { + return false +} + +// AddExtraHandler adds extra handler served on path to the http server that serves metrics. +func (s *defaultServer) AddExtraHandler(path string, handler http.Handler) error { + s.mu.Lock() + defer s.mu.Unlock() + if s.options.ExtraHandlers == nil { + s.options.ExtraHandlers = make(map[string]http.Handler) + } + if path == defaultMetricsEndpoint { + return fmt.Errorf("overriding builtin %s endpoint is not allowed", defaultMetricsEndpoint) + } + if _, found := s.options.ExtraHandlers[path]; found { + return fmt.Errorf("can't register extra handler by duplicate path %q on metrics http server", path) + } + s.options.ExtraHandlers[path] = handler + return nil +} + +// Start runs the server. +// It will install the metrics related resources depend on the server configuration. +func (s *defaultServer) Start(ctx context.Context) error { + log.Info("Starting metrics server") + + listener, err := s.createListener(ctx, log) + if err != nil { + return fmt.Errorf("failed to start metrics server: failed to create listener: %w", err) + } + // Storing bindAddr here so we can retrieve it during testing via GetBindAddr. + s.mu.Lock() + s.bindAddr = listener.Addr().String() + s.mu.Unlock() + + mux := http.NewServeMux() + + handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{ + ErrorHandling: promhttp.HTTPErrorOnError, + }) + if s.metricsFilter != nil { + log := log.WithValues("path", defaultMetricsEndpoint) + var err error + handler, err = s.metricsFilter(log, handler) + if err != nil { + return fmt.Errorf("failed to start metrics server: failed to add metrics filter: %w", err) + } + } + // TODO(JoelSpeed): Use existing Kubernetes machinery for serving metrics + mux.Handle(defaultMetricsEndpoint, handler) + + for path, extraHandler := range s.options.ExtraHandlers { + if s.metricsFilter != nil { + log := log.WithValues("path", path) + var err error + extraHandler, err = s.metricsFilter(log, extraHandler) + if err != nil { + return fmt.Errorf("failed to start metrics server: failed to add metrics filter to extra handler for path %s: %w", path, err) + } + } + mux.Handle(path, extraHandler) + } + + log.Info("Serving metrics server", "bindAddress", s.options.BindAddress, "secure", s.options.SecureServing) + + srv := httpserver.New(mux) + + idleConnsClosed := make(chan struct{}) + go func() { + <-ctx.Done() + log.Info("Shutting down metrics server with timeout of 1 minute") + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + if err := srv.Shutdown(ctx); err != nil { + // Error from closing listeners, or context timeout + log.Error(err, "error shutting down the HTTP server") + } + close(idleConnsClosed) + }() + + if err := srv.Serve(listener); err != nil && err != http.ErrServerClosed { + return err + } + + <-idleConnsClosed + return nil +} + +func (s *defaultServer) createListener(ctx context.Context, log logr.Logger) (net.Listener, error) { + if !s.options.SecureServing { + return s.options.ListenConfig.Listen(ctx, "tcp", s.options.BindAddress) + } + + cfg := &tls.Config{ + NextProtos: []string{"h2"}, + } + // fallback TLS config ready, will now mutate if passer wants full control over it + for _, op := range s.options.TLSOpts { + op(cfg) + } + + if cfg.GetCertificate == nil { + certPath := filepath.Join(s.options.CertDir, s.options.CertName) + keyPath := filepath.Join(s.options.CertDir, s.options.KeyName) + + _, certErr := os.Stat(certPath) + certExists := !os.IsNotExist(certErr) + _, keyErr := os.Stat(keyPath) + keyExists := !os.IsNotExist(keyErr) + if certExists && keyExists { + // Create the certificate watcher and + // set the config's GetCertificate on the TLSConfig + certWatcher, err := certwatcher.New(certPath, keyPath) + if err != nil { + return nil, err + } + cfg.GetCertificate = certWatcher.GetCertificate + + go func() { + if err := certWatcher.Start(ctx); err != nil { + log.Error(err, "certificate watcher error") + } + }() + } + } + + // If cfg.GetCertificate is still nil, i.e. we didn't configure a cert watcher, fallback to a self-signed certificate. + if cfg.GetCertificate == nil { + // Note: Using self-signed certificates here should be good enough. It's just important that we + // encrypt the communication. For example kube-controller-manager also uses a self-signed certificate + // for the metrics endpoint per default. + cert, key, err := certutil.GenerateSelfSignedCertKeyWithFixtures("localhost", []net.IP{{127, 0, 0, 1}}, nil, "") + if err != nil { + return nil, fmt.Errorf("failed to generate self-signed certificate for metrics server: %w", err) + } + + keyPair, err := tls.X509KeyPair(cert, key) + if err != nil { + return nil, fmt.Errorf("failed to create self-signed key pair for metrics server: %w", err) + } + cfg.Certificates = []tls.Certificate{keyPair} + } + + l, err := s.options.ListenConfig.Listen(ctx, "tcp", s.options.BindAddress) + if err != nil { + return nil, err + } + + return tls.NewListener(l, cfg), nil +} + +func (s *defaultServer) GetBindAddr() string { + s.mu.RLock() + defer s.mu.RUnlock() + return s.bindAddr +} diff --git a/pkg/metrics/workqueue.go b/pkg/metrics/workqueue.go new file mode 100644 index 0000000000..cd7ccc773e --- /dev/null +++ b/pkg/metrics/workqueue.go @@ -0,0 +1,29 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +// Metrics subsystem and all keys used by the workqueue. +const ( + WorkQueueSubsystem = "workqueue" + DepthKey = "depth" + AddsKey = "adds_total" + QueueLatencyKey = "queue_duration_seconds" + WorkDurationKey = "work_duration_seconds" + UnfinishedWorkKey = "unfinished_work_seconds" + LongestRunningProcessorKey = "longest_running_processor_seconds" + RetriesKey = "retries_total" +) diff --git a/pkg/patterns/application/doc.go b/pkg/patterns/application/doc.go deleted file mode 100644 index ea2bdd219d..0000000000 --- a/pkg/patterns/application/doc.go +++ /dev/null @@ -1,28 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package application documents patterns for building Controllers to manage specific applications. -// -// -// An application is a Controller and Resource that together implement the operational logic for an application. -// They are often used to take off-the-shelf OSS applications, and make them Kubernetes native. -// -// A typical application Controller may use a new builder.SimpleController() to create a Controller -// for a single API type that manages other objects it creates. -// -// Application Controllers are most useful for stateful applications such as Cassandra, Etcd and MySQL -// which contain operation logic for sharding, backup and restore, upgrade / downgrade, etc. -package application diff --git a/pkg/predicate/example_test.go b/pkg/predicate/example_test.go index 4ce1778983..57a1ce7779 100644 --- a/pkg/predicate/example_test.go +++ b/pkg/predicate/example_test.go @@ -27,7 +27,7 @@ var p predicate.Predicate func ExampleFuncs() { p = predicate.Funcs{ UpdateFunc: func(e event.UpdateEvent) bool { - return e.MetaOld.GetGeneration() != e.MetaNew.GetGeneration() + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() }, } } diff --git a/pkg/predicate/predicate.go b/pkg/predicate/predicate.go index 9542d4a1c0..9f24cb178c 100644 --- a/pkg/predicate/predicate.go +++ b/pkg/predicate/predicate.go @@ -17,6 +17,12 @@ limitations under the License. package predicate import ( + "maps" + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" ) @@ -24,95 +30,400 @@ import ( var log = logf.RuntimeLog.WithName("predicate").WithName("eventFilters") // Predicate filters events before enqueuing the keys. -type Predicate interface { +type Predicate = TypedPredicate[client.Object] + +// TypedPredicate filters events before enqueuing the keys. +type TypedPredicate[object any] interface { // Create returns true if the Create event should be processed - Create(event.CreateEvent) bool + Create(event.TypedCreateEvent[object]) bool // Delete returns true if the Delete event should be processed - Delete(event.DeleteEvent) bool + Delete(event.TypedDeleteEvent[object]) bool // Update returns true if the Update event should be processed - Update(event.UpdateEvent) bool + Update(event.TypedUpdateEvent[object]) bool // Generic returns true if the Generic event should be processed - Generic(event.GenericEvent) bool + Generic(event.TypedGenericEvent[object]) bool } -var _ Predicate = Funcs{} -var _ Predicate = ResourceVersionChangedPredicate{} +var ( + _ Predicate = Funcs{} + _ Predicate = ResourceVersionChangedPredicate{} + _ Predicate = GenerationChangedPredicate{} + _ Predicate = AnnotationChangedPredicate{} + _ Predicate = or[client.Object]{} + _ Predicate = and[client.Object]{} + _ Predicate = not[client.Object]{} +) // Funcs is a function that implements Predicate. -type Funcs struct { +type Funcs = TypedFuncs[client.Object] + +// TypedFuncs is a function that implements TypedPredicate. +type TypedFuncs[object any] struct { // Create returns true if the Create event should be processed - CreateFunc func(event.CreateEvent) bool + CreateFunc func(event.TypedCreateEvent[object]) bool // Delete returns true if the Delete event should be processed - DeleteFunc func(event.DeleteEvent) bool + DeleteFunc func(event.TypedDeleteEvent[object]) bool // Update returns true if the Update event should be processed - UpdateFunc func(event.UpdateEvent) bool + UpdateFunc func(event.TypedUpdateEvent[object]) bool // Generic returns true if the Generic event should be processed - GenericFunc func(event.GenericEvent) bool + GenericFunc func(event.TypedGenericEvent[object]) bool } -// Create implements Predicate -func (p Funcs) Create(e event.CreateEvent) bool { +// Create implements Predicate. +func (p TypedFuncs[object]) Create(e event.TypedCreateEvent[object]) bool { if p.CreateFunc != nil { return p.CreateFunc(e) } return true } -// Delete implements Predicate -func (p Funcs) Delete(e event.DeleteEvent) bool { +// Delete implements Predicate. +func (p TypedFuncs[object]) Delete(e event.TypedDeleteEvent[object]) bool { if p.DeleteFunc != nil { return p.DeleteFunc(e) } return true } -// Update implements Predicate -func (p Funcs) Update(e event.UpdateEvent) bool { +// Update implements Predicate. +func (p TypedFuncs[object]) Update(e event.TypedUpdateEvent[object]) bool { if p.UpdateFunc != nil { return p.UpdateFunc(e) } return true } -// Generic implements Predicate -func (p Funcs) Generic(e event.GenericEvent) bool { +// Generic implements Predicate. +func (p TypedFuncs[object]) Generic(e event.TypedGenericEvent[object]) bool { if p.GenericFunc != nil { return p.GenericFunc(e) } return true } -// ResourceVersionChangedPredicate implements a default update predicate function on resource version change -type ResourceVersionChangedPredicate struct { - Funcs +// NewPredicateFuncs returns a predicate funcs that applies the given filter function +// on CREATE, UPDATE, DELETE and GENERIC events. For UPDATE events, the filter is applied +// to the new object. +func NewPredicateFuncs(filter func(object client.Object) bool) Funcs { + return Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return filter(e.Object) + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return filter(e.ObjectNew) + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return filter(e.Object) + }, + GenericFunc: func(e event.GenericEvent) bool { + return filter(e.Object) + }, + } +} + +// NewTypedPredicateFuncs returns a predicate funcs that applies the given filter function +// on CREATE, UPDATE, DELETE and GENERIC events. For UPDATE events, the filter is applied +// to the new object. +func NewTypedPredicateFuncs[object any](filter func(object object) bool) TypedFuncs[object] { + return TypedFuncs[object]{ + CreateFunc: func(e event.TypedCreateEvent[object]) bool { + return filter(e.Object) + }, + UpdateFunc: func(e event.TypedUpdateEvent[object]) bool { + return filter(e.ObjectNew) + }, + DeleteFunc: func(e event.TypedDeleteEvent[object]) bool { + return filter(e.Object) + }, + GenericFunc: func(e event.TypedGenericEvent[object]) bool { + return filter(e.Object) + }, + } +} + +// ResourceVersionChangedPredicate implements a default update predicate function on resource version change. +type ResourceVersionChangedPredicate = TypedResourceVersionChangedPredicate[client.Object] + +// TypedResourceVersionChangedPredicate implements a default update predicate function on resource version change. +type TypedResourceVersionChangedPredicate[T metav1.Object] struct { + TypedFuncs[T] } -// Update implements default UpdateEvent filter for validating resource version change -func (ResourceVersionChangedPredicate) Update(e event.UpdateEvent) bool { - if e.MetaOld == nil { - log.Error(nil, "UpdateEvent has no old metadata", "event", e) +// Update implements default UpdateEvent filter for validating resource version change. +func (TypedResourceVersionChangedPredicate[T]) Update(e event.TypedUpdateEvent[T]) bool { + if isNil(e.ObjectOld) { + log.Error(nil, "Update event has no old object to update", "event", e) return false } - if e.ObjectOld == nil { - log.Error(nil, "GenericEvent has no old runtime object to update", "event", e) + if isNil(e.ObjectNew) { + log.Error(nil, "Update event has no new object to update", "event", e) return false } - if e.ObjectNew == nil { - log.Error(nil, "GenericEvent has no new runtime object for update", "event", e) + + return e.ObjectNew.GetResourceVersion() != e.ObjectOld.GetResourceVersion() +} + +// GenerationChangedPredicate implements a default update predicate function on Generation change. +// +// This predicate will skip update events that have no change in the object's metadata.generation field. +// The metadata.generation field of an object is incremented by the API server when writes are made to the spec field of an object. +// This allows a controller to ignore update events where the spec is unchanged, and only the metadata and/or status fields are changed. +// +// For CustomResource objects the Generation is incremented when spec is changed, or status changed and status not modeled as subresource. +// subresource status update will not increase Generation. +// +// Caveats: +// +// * The assumption that the Generation is incremented only on writing to the spec does not hold for all APIs. +// E.g For Deployment objects the Generation is also incremented on writes to the metadata.annotations field. +// For object types other than CustomResources be sure to verify which fields will trigger a Generation increment when they are written to. +// +// * With this predicate, any update events with writes only to the status field will not be reconciled. +// So in the event that the status block is overwritten or wiped by someone else the controller will not self-correct to restore the correct status. +type GenerationChangedPredicate = TypedGenerationChangedPredicate[client.Object] + +// TypedGenerationChangedPredicate implements a default update predicate function on Generation change. +// +// This predicate will skip update events that have no change in the object's metadata.generation field. +// The metadata.generation field of an object is incremented by the API server when writes are made to the spec field of an object. +// This allows a controller to ignore update events where the spec is unchanged, and only the metadata and/or status fields are changed. +// +// For CustomResource objects the Generation is incremented when spec is changed, or status changed and status not modeled as subresource. +// subresource status update will not increase Generation. +// +// Caveats: +// +// * The assumption that the Generation is incremented only on writing to the spec does not hold for all APIs. +// E.g For Deployment objects the Generation is also incremented on writes to the metadata.annotations field. +// For object types other than CustomResources be sure to verify which fields will trigger a Generation increment when they are written to. +// +// * With this predicate, any update events with writes only to the status field will not be reconciled. +// So in the event that the status block is overwritten or wiped by someone else the controller will not self-correct to restore the correct status. +type TypedGenerationChangedPredicate[object metav1.Object] struct { + TypedFuncs[object] +} + +// Update implements default UpdateEvent filter for validating generation change. +func (TypedGenerationChangedPredicate[object]) Update(e event.TypedUpdateEvent[object]) bool { + if isNil(e.ObjectOld) { + log.Error(nil, "Update event has no old object to update", "event", e) return false } - if e.MetaNew == nil { - log.Error(nil, "UpdateEvent has no new metadata", "event", e) + if isNil(e.ObjectNew) { + log.Error(nil, "Update event has no new object for update", "event", e) return false } - if e.MetaNew.GetResourceVersion() == e.MetaOld.GetResourceVersion() { + + return e.ObjectNew.GetGeneration() != e.ObjectOld.GetGeneration() +} + +// AnnotationChangedPredicate implements a default update predicate function on annotation change. +// +// This predicate will skip update events that have no change in the object's annotation. +// It is intended to be used in conjunction with the GenerationChangedPredicate, as in the following example: +// +// Controller.Watch( +// &source.Kind{Type: v1.MyCustomKind}, +// &handler.EnqueueRequestForObject{}, +// predicate.Or(predicate.GenerationChangedPredicate{}, predicate.AnnotationChangedPredicate{})) +// +// This is mostly useful for controllers that needs to trigger both when the resource's generation is incremented +// (i.e., when the resource' .spec changes), or an annotation changes (e.g., for a staging/alpha API). +type AnnotationChangedPredicate = TypedAnnotationChangedPredicate[client.Object] + +// TypedAnnotationChangedPredicate implements a default update predicate function on annotation change. +type TypedAnnotationChangedPredicate[object metav1.Object] struct { + TypedFuncs[object] +} + +// Update implements default UpdateEvent filter for validating annotation change. +func (TypedAnnotationChangedPredicate[object]) Update(e event.TypedUpdateEvent[object]) bool { + if isNil(e.ObjectOld) { + log.Error(nil, "Update event has no old object to update", "event", e) return false } + if isNil(e.ObjectNew) { + log.Error(nil, "Update event has no new object for update", "event", e) + return false + } + + return !maps.Equal(e.ObjectNew.GetAnnotations(), e.ObjectOld.GetAnnotations()) +} + +// LabelChangedPredicate implements a default update predicate function on label change. +// +// This predicate will skip update events that have no change in the object's label. +// It is intended to be used in conjunction with the GenerationChangedPredicate, as in the following example: +// +// Controller.Watch( +// &source.Kind{Type: v1.MyCustomKind}, +// &handler.EnqueueRequestForObject{}, +// predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{})) +// +// This will be helpful when object's labels is carrying some extra specification information beyond object's spec, +// and the controller will be triggered if any valid spec change (not only in spec, but also in labels) happens. +type LabelChangedPredicate = TypedLabelChangedPredicate[client.Object] + +// TypedLabelChangedPredicate implements a default update predicate function on label change. +type TypedLabelChangedPredicate[object metav1.Object] struct { + TypedFuncs[object] +} + +// Update implements default UpdateEvent filter for checking label change. +func (TypedLabelChangedPredicate[object]) Update(e event.TypedUpdateEvent[object]) bool { + if isNil(e.ObjectOld) { + log.Error(nil, "Update event has no old object to update", "event", e) + return false + } + if isNil(e.ObjectNew) { + log.Error(nil, "Update event has no new object for update", "event", e) + return false + } + + return !maps.Equal(e.ObjectNew.GetLabels(), e.ObjectOld.GetLabels()) +} + +// And returns a composite predicate that implements a logical AND of the predicates passed to it. +func And[object any](predicates ...TypedPredicate[object]) TypedPredicate[object] { + return and[object]{predicates} +} + +type and[object any] struct { + predicates []TypedPredicate[object] +} + +func (a and[object]) Create(e event.TypedCreateEvent[object]) bool { + for _, p := range a.predicates { + if !p.Create(e) { + return false + } + } return true } + +func (a and[object]) Update(e event.TypedUpdateEvent[object]) bool { + for _, p := range a.predicates { + if !p.Update(e) { + return false + } + } + return true +} + +func (a and[object]) Delete(e event.TypedDeleteEvent[object]) bool { + for _, p := range a.predicates { + if !p.Delete(e) { + return false + } + } + return true +} + +func (a and[object]) Generic(e event.TypedGenericEvent[object]) bool { + for _, p := range a.predicates { + if !p.Generic(e) { + return false + } + } + return true +} + +// Or returns a composite predicate that implements a logical OR of the predicates passed to it. +func Or[object any](predicates ...TypedPredicate[object]) TypedPredicate[object] { + return or[object]{predicates} +} + +type or[object any] struct { + predicates []TypedPredicate[object] +} + +func (o or[object]) Create(e event.TypedCreateEvent[object]) bool { + for _, p := range o.predicates { + if p.Create(e) { + return true + } + } + return false +} + +func (o or[object]) Update(e event.TypedUpdateEvent[object]) bool { + for _, p := range o.predicates { + if p.Update(e) { + return true + } + } + return false +} + +func (o or[object]) Delete(e event.TypedDeleteEvent[object]) bool { + for _, p := range o.predicates { + if p.Delete(e) { + return true + } + } + return false +} + +func (o or[object]) Generic(e event.TypedGenericEvent[object]) bool { + for _, p := range o.predicates { + if p.Generic(e) { + return true + } + } + return false +} + +// Not returns a predicate that implements a logical NOT of the predicate passed to it. +func Not[object any](predicate TypedPredicate[object]) TypedPredicate[object] { + return not[object]{predicate} +} + +type not[object any] struct { + predicate TypedPredicate[object] +} + +func (n not[object]) Create(e event.TypedCreateEvent[object]) bool { + return !n.predicate.Create(e) +} + +func (n not[object]) Update(e event.TypedUpdateEvent[object]) bool { + return !n.predicate.Update(e) +} + +func (n not[object]) Delete(e event.TypedDeleteEvent[object]) bool { + return !n.predicate.Delete(e) +} + +func (n not[object]) Generic(e event.TypedGenericEvent[object]) bool { + return !n.predicate.Generic(e) +} + +// LabelSelectorPredicate constructs a Predicate from a LabelSelector. +// Only objects matching the LabelSelector will be admitted. +func LabelSelectorPredicate(s metav1.LabelSelector) (Predicate, error) { + selector, err := metav1.LabelSelectorAsSelector(&s) + if err != nil { + return Funcs{}, err + } + return NewPredicateFuncs(func(o client.Object) bool { + return selector.Matches(labels.Set(o.GetLabels())) + }), nil +} + +func isNil(arg any) bool { + if v := reflect.ValueOf(arg); !v.IsValid() || ((v.Kind() == reflect.Ptr || + v.Kind() == reflect.Interface || + v.Kind() == reflect.Slice || + v.Kind() == reflect.Map || + v.Kind() == reflect.Chan || + v.Kind() == reflect.Func) && v.IsNil()) { + return true + } + return false +} diff --git a/pkg/predicate/predicate_suite_test.go b/pkg/predicate/predicate_suite_test.go index 15ee76ffef..170594ca52 100644 --- a/pkg/predicate/predicate_suite_test.go +++ b/pkg/predicate/predicate_suite_test.go @@ -19,18 +19,17 @@ package predicate_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" ) func TestPredicate(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, "Predicate Suite", []Reporter{envtest.NewlineReporter{}}) + RunSpecs(t, "Predicate Suite") } var _ = BeforeSuite(func() { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) }) diff --git a/pkg/predicate/predicate_test.go b/pkg/predicate/predicate_test.go index b66c462f4a..f322b7810b 100644 --- a/pkg/predicate/predicate_test.go +++ b/pkg/predicate/predicate_test.go @@ -17,10 +17,12 @@ limitations under the License. package predicate_test import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" ) @@ -57,23 +59,20 @@ var _ = Describe("Predicate", func() { }, } - It("should call Create", func(done Done) { + It("should call Create", func() { instance := failingFuncs instance.CreateFunc = func(evt event.CreateEvent) bool { defer GinkgoRecover() - Expect(evt.Meta).To(Equal(pod.GetObjectMeta())) Expect(evt.Object).To(Equal(pod)) return false } evt := event.CreateEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } Expect(instance.Create(evt)).To(BeFalse()) instance.CreateFunc = func(evt event.CreateEvent) bool { defer GinkgoRecover() - Expect(evt.Meta).To(Equal(pod.GetObjectMeta())) Expect(evt.Object).To(Equal(pod)) return true } @@ -81,10 +80,9 @@ var _ = Describe("Predicate", func() { instance.CreateFunc = nil Expect(instance.Create(evt)).To(BeTrue()) - close(done) }) - It("should call Update", func(done Done) { + It("should call Update", func() { newPod := pod.DeepCopy() newPod.Name = "baz2" newPod.Namespace = "biz2" @@ -92,25 +90,19 @@ var _ = Describe("Predicate", func() { instance := failingFuncs instance.UpdateFunc = func(evt event.UpdateEvent) bool { defer GinkgoRecover() - Expect(evt.MetaOld).To(Equal(pod.GetObjectMeta())) Expect(evt.ObjectOld).To(Equal(pod)) - Expect(evt.MetaNew).To(Equal(newPod.GetObjectMeta())) Expect(evt.ObjectNew).To(Equal(newPod)) return false } evt := event.UpdateEvent{ ObjectOld: pod, - MetaOld: pod.GetObjectMeta(), ObjectNew: newPod, - MetaNew: newPod.GetObjectMeta(), } Expect(instance.Update(evt)).To(BeFalse()) instance.UpdateFunc = func(evt event.UpdateEvent) bool { defer GinkgoRecover() - Expect(evt.MetaOld).To(Equal(pod.GetObjectMeta())) Expect(evt.ObjectOld).To(Equal(pod)) - Expect(evt.MetaNew).To(Equal(newPod.GetObjectMeta())) Expect(evt.ObjectNew).To(Equal(newPod)) return true } @@ -118,26 +110,22 @@ var _ = Describe("Predicate", func() { instance.UpdateFunc = nil Expect(instance.Update(evt)).To(BeTrue()) - close(done) }) - It("should call Delete", func(done Done) { + It("should call Delete", func() { instance := failingFuncs instance.DeleteFunc = func(evt event.DeleteEvent) bool { defer GinkgoRecover() - Expect(evt.Meta).To(Equal(pod.GetObjectMeta())) Expect(evt.Object).To(Equal(pod)) return false } evt := event.DeleteEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } Expect(instance.Delete(evt)).To(BeFalse()) instance.DeleteFunc = func(evt event.DeleteEvent) bool { defer GinkgoRecover() - Expect(evt.Meta).To(Equal(pod.GetObjectMeta())) Expect(evt.Object).To(Equal(pod)) return true } @@ -145,26 +133,22 @@ var _ = Describe("Predicate", func() { instance.DeleteFunc = nil Expect(instance.Delete(evt)).To(BeTrue()) - close(done) }) - It("should call Generic", func(done Done) { + It("should call Generic", func() { instance := failingFuncs instance.GenericFunc = func(evt event.GenericEvent) bool { defer GinkgoRecover() - Expect(evt.Meta).To(Equal(pod.GetObjectMeta())) Expect(evt.Object).To(Equal(pod)) return false } evt := event.GenericEvent{ Object: pod, - Meta: pod.GetObjectMeta(), } Expect(instance.Generic(evt)).To(BeFalse()) instance.GenericFunc = func(evt event.GenericEvent) bool { defer GinkgoRecover() - Expect(evt.Meta).To(Equal(pod.GetObjectMeta())) Expect(evt.Object).To(Equal(pod)) return true } @@ -172,7 +156,6 @@ var _ = Describe("Predicate", func() { instance.GenericFunc = nil Expect(instance.Generic(evt)).To(BeTrue()) - close(done) }) }) @@ -181,7 +164,7 @@ var _ = Describe("Predicate", func() { Context("Where the old object doesn't have a ResourceVersion or metadata", func() { It("should return false", func() { - new := &corev1.Pod{ + newPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: "biz", @@ -189,8 +172,7 @@ var _ = Describe("Predicate", func() { }} failEvnt := event.UpdateEvent{ - MetaNew: new.GetObjectMeta(), - ObjectNew: new, + ObjectNew: newPod, } Expect(instance.Create(event.CreateEvent{})).Should(BeTrue()) Expect(instance.Delete(event.DeleteEvent{})).Should(BeTrue()) @@ -201,7 +183,7 @@ var _ = Describe("Predicate", func() { Context("Where the new object doesn't have a ResourceVersion or metadata", func() { It("should return false", func() { - old := &corev1.Pod{ + oldPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: "biz", @@ -209,8 +191,7 @@ var _ = Describe("Predicate", func() { }} failEvnt := event.UpdateEvent{ - MetaOld: old.GetObjectMeta(), - ObjectOld: old, + ObjectOld: oldPod, } Expect(instance.Create(event.CreateEvent{})).Should(BeTrue()) Expect(instance.Delete(event.DeleteEvent{})).Should(BeTrue()) @@ -222,14 +203,14 @@ var _ = Describe("Predicate", func() { Context("Where the ResourceVersion hasn't changed", func() { It("should return false", func() { - new := &corev1.Pod{ + newPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: "biz", ResourceVersion: "v1", }} - old := &corev1.Pod{ + oldPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: "biz", @@ -237,10 +218,8 @@ var _ = Describe("Predicate", func() { }} failEvnt := event.UpdateEvent{ - MetaOld: old.GetObjectMeta(), - ObjectOld: old, - MetaNew: new.GetObjectMeta(), - ObjectNew: new, + ObjectOld: oldPod, + ObjectNew: newPod, } Expect(instance.Create(event.CreateEvent{})).Should(BeTrue()) Expect(instance.Delete(event.DeleteEvent{})).Should(BeTrue()) @@ -252,24 +231,22 @@ var _ = Describe("Predicate", func() { Context("Where the ResourceVersion has changed", func() { It("should return true", func() { - new := &corev1.Pod{ + newPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: "biz", ResourceVersion: "v1", }} - old := &corev1.Pod{ + oldPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: "biz", ResourceVersion: "v2", }} passEvt := event.UpdateEvent{ - MetaOld: old.GetObjectMeta(), - ObjectOld: old, - MetaNew: new.GetObjectMeta(), - ObjectNew: new, + ObjectOld: oldPod, + ObjectNew: newPod, } Expect(instance.Create(event.CreateEvent{})).Should(BeTrue()) Expect(instance.Delete(event.DeleteEvent{})).Should(BeTrue()) @@ -281,23 +258,23 @@ var _ = Describe("Predicate", func() { Context("Where the objects or metadata are missing", func() { It("should return false", func() { - new := &corev1.Pod{ + newPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: "biz", ResourceVersion: "v1", }} - old := &corev1.Pod{ + oldPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "baz", Namespace: "biz", ResourceVersion: "v1", }} - failEvt1 := event.UpdateEvent{MetaOld: old.GetObjectMeta(), ObjectOld: old, MetaNew: new.GetObjectMeta()} - failEvt2 := event.UpdateEvent{MetaOld: old.GetObjectMeta(), MetaNew: new.GetObjectMeta(), ObjectNew: new} - failEvt3 := event.UpdateEvent{MetaOld: old.GetObjectMeta(), ObjectOld: old, ObjectNew: new} + failEvt1 := event.UpdateEvent{ObjectOld: oldPod} + failEvt2 := event.UpdateEvent{ObjectNew: newPod} + failEvt3 := event.UpdateEvent{ObjectOld: oldPod, ObjectNew: newPod} Expect(instance.Create(event.CreateEvent{})).Should(BeTrue()) Expect(instance.Delete(event.DeleteEvent{})).Should(BeTrue()) Expect(instance.Generic(event.GenericEvent{})).Should(BeTrue()) @@ -308,4 +285,675 @@ var _ = Describe("Predicate", func() { }) }) + + Describe("When checking a GenerationChangedPredicate", func() { + instance := predicate.GenerationChangedPredicate{} + Context("Where the old object doesn't have a Generation or metadata", func() { + It("should return false", func() { + newPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Generation: 1, + }} + + failEvnt := event.UpdateEvent{ + ObjectNew: newPod, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(failEvnt)).To(BeFalse()) + }) + }) + + Context("Where the new object doesn't have a Generation or metadata", func() { + It("should return false", func() { + oldPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Generation: 1, + }} + + failEvnt := event.UpdateEvent{ + ObjectOld: oldPod, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(failEvnt)).To(BeFalse()) + }) + }) + + Context("Where the Generation hasn't changed", func() { + It("should return false", func() { + newPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Generation: 1, + }} + + oldPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Generation: 1, + }} + + failEvnt := event.UpdateEvent{ + ObjectOld: oldPod, + ObjectNew: newPod, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(failEvnt)).To(BeFalse()) + }) + }) + + Context("Where the Generation has changed", func() { + It("should return true", func() { + newPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Generation: 1, + }} + + oldPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Generation: 2, + }} + passEvt := event.UpdateEvent{ + ObjectOld: oldPod, + ObjectNew: newPod, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(passEvt)).To(BeTrue()) + }) + }) + + Context("Where the objects or metadata are missing", func() { + + It("should return false", func() { + newPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Generation: 1, + }} + + oldPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Generation: 1, + }} + + failEvt1 := event.UpdateEvent{ObjectOld: oldPod} + failEvt2 := event.UpdateEvent{ObjectNew: newPod} + failEvt3 := event.UpdateEvent{ObjectOld: oldPod, ObjectNew: newPod} + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(failEvt1)).To(BeFalse()) + Expect(instance.Update(failEvt2)).To(BeFalse()) + Expect(instance.Update(failEvt3)).To(BeFalse()) + }) + }) + + }) + + // AnnotationChangedPredicate has almost identical test cases as LabelChangedPredicates, + // so the duplication linter should be muted on both two test suites. + Describe("When checking an AnnotationChangedPredicate", func() { + instance := predicate.AnnotationChangedPredicate{} + Context("Where the old object is missing", func() { + It("should return false", func() { + newPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Annotations: map[string]string{ + "booz": "wooz", + }, + }} + + failEvnt := event.UpdateEvent{ + ObjectNew: newPod, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(failEvnt)).To(BeFalse()) + }) + }) + + Context("Where the new object is missing", func() { + It("should return false", func() { + oldPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Annotations: map[string]string{ + "booz": "wooz", + }, + }} + + failEvnt := event.UpdateEvent{ + ObjectOld: oldPod, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(failEvnt)).To(BeFalse()) + }) + }) + + Context("Where the annotations are empty", func() { + It("should return false", func() { + newPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + }} + + oldPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + }} + + failEvnt := event.UpdateEvent{ + ObjectOld: oldPod, + ObjectNew: newPod, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(failEvnt)).To(BeFalse()) + }) + }) + + Context("Where the annotations haven't changed", func() { + It("should return false", func() { + newPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Annotations: map[string]string{ + "booz": "wooz", + }, + }} + + oldPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Annotations: map[string]string{ + "booz": "wooz", + }, + }} + + failEvnt := event.UpdateEvent{ + ObjectOld: oldPod, + ObjectNew: newPod, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(failEvnt)).To(BeFalse()) + }) + }) + + Context("Where an annotation value has changed", func() { + It("should return true", func() { + newPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Annotations: map[string]string{ + "booz": "wooz", + }, + }} + + oldPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Annotations: map[string]string{ + "booz": "weez", + }, + }} + + passEvt := event.UpdateEvent{ + ObjectOld: oldPod, + ObjectNew: newPod, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(passEvt)).To(BeTrue()) + }) + }) + + Context("Where an annotation has been added", func() { + It("should return true", func() { + newPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Annotations: map[string]string{ + "booz": "wooz", + }, + }} + + oldPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Annotations: map[string]string{ + "booz": "wooz", + "zooz": "qooz", + }, + }} + + passEvt := event.UpdateEvent{ + ObjectOld: oldPod, + ObjectNew: newPod, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(passEvt)).To(BeTrue()) + }) + }) + + Context("Where an annotation has been removed", func() { + It("should return true", func() { + newPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Annotations: map[string]string{ + "booz": "wooz", + "zooz": "qooz", + }, + }} + + oldPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Annotations: map[string]string{ + "booz": "wooz", + }, + }} + + passEvt := event.UpdateEvent{ + ObjectOld: oldPod, + ObjectNew: newPod, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(passEvt)).To(BeTrue()) + }) + }) + }) + + // LabelChangedPredicates has almost identical test cases as AnnotationChangedPredicates, + // so the duplication linter should be muted on both two test suites. + Describe("When checking a LabelChangedPredicate", func() { + instance := predicate.LabelChangedPredicate{} + Context("Where the old object is missing", func() { + It("should return false", func() { + newPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Labels: map[string]string{ + "foo": "bar", + }, + }} + + evt := event.UpdateEvent{ + ObjectNew: newPod, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(evt)).To(BeFalse()) + }) + }) + + Context("Where the new object is missing", func() { + It("should return false", func() { + oldPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Labels: map[string]string{ + "foo": "bar", + }, + }} + + evt := event.UpdateEvent{ + ObjectOld: oldPod, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(evt)).To(BeFalse()) + }) + }) + + Context("Where the labels are empty", func() { + It("should return false", func() { + newPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + }} + + oldPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + }} + + evt := event.UpdateEvent{ + ObjectOld: oldPod, + ObjectNew: newPod, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(evt)).To(BeFalse()) + }) + }) + + Context("Where the labels haven't changed", func() { + It("should return false", func() { + newPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Labels: map[string]string{ + "foo": "bar", + }, + }} + + oldPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Labels: map[string]string{ + "foo": "bar", + }, + }} + + evt := event.UpdateEvent{ + ObjectOld: oldPod, + ObjectNew: newPod, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(evt)).To(BeFalse()) + }) + }) + + Context("Where a label value has changed", func() { + It("should return true", func() { + newPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Labels: map[string]string{ + "foo": "bar", + }, + }} + + oldPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Labels: map[string]string{ + "foo": "bee", + }, + }} + + evt := event.UpdateEvent{ + ObjectOld: oldPod, + ObjectNew: newPod, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(evt)).To(BeTrue()) + }) + }) + + Context("Where a label has been added", func() { + It("should return true", func() { + newPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Labels: map[string]string{ + "foo": "bar", + }, + }} + + oldPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Labels: map[string]string{ + "foo": "bar", + "faa": "bor", + }, + }} + + evt := event.UpdateEvent{ + ObjectOld: oldPod, + ObjectNew: newPod, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(evt)).To(BeTrue()) + }) + }) + + Context("Where a label has been removed", func() { + It("should return true", func() { + newPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Labels: map[string]string{ + "foo": "bar", + "faa": "bor", + }, + }} + + oldPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + Labels: map[string]string{ + "foo": "bar", + }, + }} + + evt := event.UpdateEvent{ + ObjectOld: oldPod, + ObjectNew: newPod, + } + Expect(instance.Create(event.CreateEvent{})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{})).To(BeTrue()) + Expect(instance.Update(evt)).To(BeTrue()) + }) + }) + }) + + Context("With a boolean predicate", func() { + funcs := func(pass bool) predicate.Funcs { + return predicate.Funcs{ + CreateFunc: func(event.CreateEvent) bool { + return pass + }, + DeleteFunc: func(event.DeleteEvent) bool { + return pass + }, + UpdateFunc: func(event.UpdateEvent) bool { + return pass + }, + GenericFunc: func(event.GenericEvent) bool { + return pass + }, + } + } + passFuncs := funcs(true) + failFuncs := funcs(false) + + Describe("When checking an And predicate", func() { + It("should return false when one of its predicates returns false", func() { + a := predicate.And(passFuncs, failFuncs) + Expect(a.Create(event.CreateEvent{})).To(BeFalse()) + Expect(a.Update(event.UpdateEvent{})).To(BeFalse()) + Expect(a.Delete(event.DeleteEvent{})).To(BeFalse()) + Expect(a.Generic(event.GenericEvent{})).To(BeFalse()) + }) + It("should return true when all of its predicates return true", func() { + a := predicate.And(passFuncs, passFuncs) + Expect(a.Create(event.CreateEvent{})).To(BeTrue()) + Expect(a.Update(event.UpdateEvent{})).To(BeTrue()) + Expect(a.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(a.Generic(event.GenericEvent{})).To(BeTrue()) + }) + }) + Describe("When checking an Or predicate", func() { + It("should return true when one of its predicates returns true", func() { + o := predicate.Or(passFuncs, failFuncs) + Expect(o.Create(event.CreateEvent{})).To(BeTrue()) + Expect(o.Update(event.UpdateEvent{})).To(BeTrue()) + Expect(o.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(o.Generic(event.GenericEvent{})).To(BeTrue()) + }) + It("should return false when all of its predicates return false", func() { + o := predicate.Or(failFuncs, failFuncs) + Expect(o.Create(event.CreateEvent{})).To(BeFalse()) + Expect(o.Update(event.UpdateEvent{})).To(BeFalse()) + Expect(o.Delete(event.DeleteEvent{})).To(BeFalse()) + Expect(o.Generic(event.GenericEvent{})).To(BeFalse()) + }) + }) + Describe("When checking a Not predicate", func() { + It("should return false when its predicate returns true", func() { + n := predicate.Not(passFuncs) + Expect(n.Create(event.CreateEvent{})).To(BeFalse()) + Expect(n.Update(event.UpdateEvent{})).To(BeFalse()) + Expect(n.Delete(event.DeleteEvent{})).To(BeFalse()) + Expect(n.Generic(event.GenericEvent{})).To(BeFalse()) + }) + It("should return true when its predicate returns false", func() { + n := predicate.Not(failFuncs) + Expect(n.Create(event.CreateEvent{})).To(BeTrue()) + Expect(n.Update(event.UpdateEvent{})).To(BeTrue()) + Expect(n.Delete(event.DeleteEvent{})).To(BeTrue()) + Expect(n.Generic(event.GenericEvent{})).To(BeTrue()) + }) + }) + }) + + Describe("NewPredicateFuncs with a namespace filter function", func() { + byNamespaceFilter := func(namespace string) func(object client.Object) bool { + return func(object client.Object) bool { + return object.GetNamespace() == namespace + } + } + byNamespaceFuncs := predicate.NewPredicateFuncs(byNamespaceFilter("biz")) + Context("Where the namespace is matching", func() { + It("should return true", func() { + newPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + }} + + oldPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + }} + passEvt1 := event.UpdateEvent{ObjectOld: oldPod, ObjectNew: newPod} + Expect(byNamespaceFuncs.Create(event.CreateEvent{Object: newPod})).To(BeTrue()) + Expect(byNamespaceFuncs.Delete(event.DeleteEvent{Object: oldPod})).To(BeTrue()) + Expect(byNamespaceFuncs.Generic(event.GenericEvent{Object: newPod})).To(BeTrue()) + Expect(byNamespaceFuncs.Update(passEvt1)).To(BeTrue()) + }) + }) + + Context("Where the namespace is not matching", func() { + It("should return false", func() { + newPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "bizz", + }} + + oldPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baz", + Namespace: "biz", + }} + failEvt1 := event.UpdateEvent{ObjectOld: oldPod, ObjectNew: newPod} + Expect(byNamespaceFuncs.Create(event.CreateEvent{Object: newPod})).To(BeFalse()) + Expect(byNamespaceFuncs.Delete(event.DeleteEvent{Object: newPod})).To(BeFalse()) + Expect(byNamespaceFuncs.Generic(event.GenericEvent{Object: newPod})).To(BeFalse()) + Expect(byNamespaceFuncs.Update(failEvt1)).To(BeFalse()) + }) + }) + }) + + Describe("When checking a LabelSelectorPredicate", func() { + instance, err := predicate.LabelSelectorPredicate(metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}) + if err != nil { + Fail("Improper Label Selector passed during predicate instantiation.") + } + + Context("When the Selector does not match the event labels", func() { + It("should return false", func() { + failMatch := &corev1.Pod{} + Expect(instance.Create(event.CreateEvent{Object: failMatch})).To(BeFalse()) + Expect(instance.Delete(event.DeleteEvent{Object: failMatch})).To(BeFalse()) + Expect(instance.Generic(event.GenericEvent{Object: failMatch})).To(BeFalse()) + Expect(instance.Update(event.UpdateEvent{ObjectNew: failMatch})).To(BeFalse()) + }) + }) + + Context("When the Selector matches the event labels", func() { + It("should return true", func() { + successMatch := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"foo": "bar"}, + }, + } + Expect(instance.Create(event.CreateEvent{Object: successMatch})).To(BeTrue()) + Expect(instance.Delete(event.DeleteEvent{Object: successMatch})).To(BeTrue()) + Expect(instance.Generic(event.GenericEvent{Object: successMatch})).To(BeTrue()) + Expect(instance.Update(event.UpdateEvent{ObjectNew: successMatch})).To(BeTrue()) + }) + }) + }) }) diff --git a/pkg/reconcile/example_test.go b/pkg/reconcile/example_test.go index 55e0e976c9..2b799df90d 100644 --- a/pkg/reconcile/example_test.go +++ b/pkg/reconcile/example_test.go @@ -17,6 +17,7 @@ limitations under the License. package reconcile_test import ( + "context" "fmt" "time" @@ -26,15 +27,13 @@ import ( // This example implements a simple no-op reconcile function that prints the object to be Reconciled. func ExampleFunc() { - type Reconciler struct{} - - r := reconcile.Func(func(o reconcile.Request) (reconcile.Result, error) { + r := reconcile.Func(func(_ context.Context, o reconcile.Request) (reconcile.Result, error) { // Create your business logic to create, update, delete objects here. fmt.Printf("Name: %s, Namespace: %s", o.Name, o.Namespace) return reconcile.Result{}, nil }) - res, err := r.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "default", Name: "test"}}) + res, err := r.Reconcile(context.Background(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "default", Name: "test"}}) if err != nil || res.Requeue || res.RequeueAfter != time.Duration(0) { fmt.Printf("got requeue request: %v, %v\n", err, res) } diff --git a/pkg/reconcile/reconcile.go b/pkg/reconcile/reconcile.go index fbdec10870..88303ae781 100644 --- a/pkg/reconcile/reconcile.go +++ b/pkg/reconcile/reconcile.go @@ -17,19 +17,46 @@ limitations under the License. package reconcile import ( + "context" + "errors" + "reflect" "time" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" ) // Result contains the result of a Reconciler invocation. type Result struct { - // Requeue tells the Controller to requeue the reconcile key. Defaults to false. + // Requeue tells the Controller to perform a ratelimited requeue + // using the workqueues ratelimiter. Defaults to false. + // + // This setting is deprecated as it causes confusion and there is + // no good reason to use it. When waiting for an external event to + // happen, either the duration until it is supposed to happen or an + // appropriate poll interval should be used, rather than an + // interval emitted by a ratelimiter whose purpose it is to control + // retry on error. + // + // Deprecated: Use `RequeueAfter` instead. Requeue bool // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. RequeueAfter time.Duration + + // Priority is the priority that will be used if the item gets re-enqueued (also if an error is returned). + // If Priority is not set the original Priority of the request is preserved. + // Note: Priority is only respected if the controller is using a priorityqueue.PriorityQueue. + Priority *int +} + +// IsZero returns true if this result is empty. +func (r *Result) IsZero() bool { + if r == nil { + return true + } + return *r == Result{} } // Request contains the information necessary to reconcile a Kubernetes object. This includes the @@ -52,24 +79,24 @@ Deleting Kubernetes objects) or external Events (GitHub Webhooks, polling extern Example reconcile Logic: - * Reader an object and all the Pods it owns. - * Observe that the object spec specifies 5 replicas but actual cluster contains only 1 Pod replica. - * Create 4 Pods and set their OwnerReferences to the object. +* Read an object and all the Pods it owns. +* Observe that the object spec specifies 5 replicas but actual cluster contains only 1 Pod replica. +* Create 4 Pods and set their OwnerReferences to the object. reconcile may be implemented as either a type: - type reconcile struct {} + type reconciler struct {} - func (reconcile) reconcile(controller.Request) (controller.Result, error) { + func (reconciler) Reconcile(ctx context.Context, o reconcile.Request) (reconcile.Result, error) { // Implement business logic of reading and writing objects here - return controller.Result{}, nil + return reconcile.Result{}, nil } Or as a function: - controller.Func(func(o controller.Request) (controller.Result, error) { + reconcile.Func(func(ctx context.Context, o reconcile.Request) (reconcile.Result, error) { // Implement business logic of reading and writing objects here - return controller.Result{}, nil + return reconcile.Result{}, nil }) Reconciliation is level-based, meaning action isn't driven off changes in individual Events, but instead is @@ -77,17 +104,94 @@ driven by actual cluster state read from the apiserver or a local cache. For example if responding to a Pod Delete Event, the Request won't contain that a Pod was deleted, instead the reconcile function observes this when reading the cluster state and seeing the Pod as missing. */ -type Reconciler interface { - // Reconciler performs a full reconciliation for the object referred to by the Request. - // The Controller will requeue the Request to be processed again if an error is non-nil or - // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. - Reconcile(Request) (Result, error) +type Reconciler = TypedReconciler[Request] + +// TypedReconciler implements an API for a specific Resource by Creating, Updating or Deleting Kubernetes +// objects, or by making changes to systems external to the cluster (e.g. cloudproviders, github, etc). +// +// The request type is what event handlers put into the workqueue. The workqueue then de-duplicates identical +// requests. +type TypedReconciler[request comparable] interface { + // Reconcile performs a full reconciliation for the object referred to by the Request. + // + // If the returned error is non-nil, the Result is ignored and the request will be + // requeued using exponential backoff. The only exception is if the error is a + // TerminalError in which case no requeuing happens. + // + // If the error is nil and the returned Result has a non-zero result.RequeueAfter, the request + // will be requeued after the specified duration. + // + // If the error is nil and result.RequeueAfter is zero and result.Requeue is true, the request + // will be requeued using exponential backoff. + Reconcile(context.Context, request) (Result, error) } // Func is a function that implements the reconcile interface. -type Func func(Request) (Result, error) +type Func = TypedFunc[Request] + +// TypedFunc is a function that implements the reconcile interface. +type TypedFunc[request comparable] func(context.Context, request) (Result, error) var _ Reconciler = Func(nil) // Reconcile implements Reconciler. -func (r Func) Reconcile(o Request) (Result, error) { return r(o) } +func (r TypedFunc[request]) Reconcile(ctx context.Context, req request) (Result, error) { + return r(ctx, req) +} + +// ObjectReconciler is a specialized version of Reconciler that acts on instances of client.Object. Each reconciliation +// event gets the associated object from Kubernetes before passing it to Reconcile. An ObjectReconciler can be used in +// Builder.Complete by calling AsReconciler. See Reconciler for more details. +type ObjectReconciler[object client.Object] interface { + Reconcile(context.Context, object) (Result, error) +} + +// AsReconciler creates a Reconciler based on the given ObjectReconciler. +func AsReconciler[object client.Object](client client.Client, rec ObjectReconciler[object]) Reconciler { + return &objectReconcilerAdapter[object]{ + objReconciler: rec, + client: client, + } +} + +type objectReconcilerAdapter[object client.Object] struct { + objReconciler ObjectReconciler[object] + client client.Client +} + +// Reconcile implements Reconciler. +func (a *objectReconcilerAdapter[object]) Reconcile(ctx context.Context, req Request) (Result, error) { + o := reflect.New(reflect.TypeOf(*new(object)).Elem()).Interface().(object) + if err := a.client.Get(ctx, req.NamespacedName, o); err != nil { + return Result{}, client.IgnoreNotFound(err) + } + + return a.objReconciler.Reconcile(ctx, o) +} + +// TerminalError is an error that will not be retried but still be logged +// and recorded in metrics. +func TerminalError(wrapped error) error { + return &terminalError{err: wrapped} +} + +type terminalError struct { + err error +} + +// Unwrap returns nil if te.err is nil. +func (te *terminalError) Unwrap() error { + return te.err +} + +func (te *terminalError) Error() string { + if te.err == nil { + return "nil terminal error" + } + return "terminal error: " + te.err.Error() +} + +func (te *terminalError) Is(target error) bool { + tp := &terminalError{} + return errors.As(target, &tp) +} diff --git a/pkg/reconcile/reconcile_suite_test.go b/pkg/reconcile/reconcile_suite_test.go index f9fbba4b32..9bab444ebd 100644 --- a/pkg/reconcile/reconcile_suite_test.go +++ b/pkg/reconcile/reconcile_suite_test.go @@ -19,18 +19,17 @@ package reconcile_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" ) func TestReconcile(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, "reconcile Suite", []Reporter{envtest.NewlineReporter{}}) + RunSpecs(t, "Reconcile Suite") } var _ = BeforeSuite(func() { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) }) diff --git a/pkg/reconcile/reconcile_test.go b/pkg/reconcile/reconcile_test.go index 120f2789f4..bb5644b87c 100644 --- a/pkg/reconcile/reconcile_test.go +++ b/pkg/reconcile/reconcile_test.go @@ -17,17 +17,53 @@ limitations under the License. package reconcile_test import ( + "context" "fmt" + "time" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) +type mockObjectReconciler struct { + reconcileFunc func(context.Context, *corev1.ConfigMap) (reconcile.Result, error) +} + +func (r *mockObjectReconciler) Reconcile(ctx context.Context, cm *corev1.ConfigMap) (reconcile.Result, error) { + return r.reconcileFunc(ctx, cm) +} + var _ = Describe("reconcile", func() { + Describe("Result", func() { + It("IsZero should return true if empty", func() { + var res *reconcile.Result + Expect(res.IsZero()).To(BeTrue()) + res2 := &reconcile.Result{} + Expect(res2.IsZero()).To(BeTrue()) + res3 := reconcile.Result{} + Expect(res3.IsZero()).To(BeTrue()) + }) + + It("IsZero should return false if Requeue is set to true", func() { + res := reconcile.Result{Requeue: true} + Expect(res.IsZero()).To(BeFalse()) + }) + + It("IsZero should return false if RequeueAfter is set to true", func() { + res := reconcile.Result{RequeueAfter: 1 * time.Second} + Expect(res.IsZero()).To(BeFalse()) + }) + }) + Describe("Func", func() { - It("should call the function with the request and return a nil error.", func() { + It("should call the function with the request and return a nil error.", func(ctx SpecContext) { request := reconcile.Request{ NamespacedName: types.NamespacedName{Name: "foo", Namespace: "bar"}, } @@ -35,18 +71,18 @@ var _ = Describe("reconcile", func() { Requeue: true, } - instance := reconcile.Func(func(r reconcile.Request) (reconcile.Result, error) { + instance := reconcile.Func(func(_ context.Context, r reconcile.Request) (reconcile.Result, error) { defer GinkgoRecover() Expect(r).To(Equal(request)) return result, nil }) - actualResult, actualErr := instance.Reconcile(request) + actualResult, actualErr := instance.Reconcile(ctx, request) Expect(actualResult).To(Equal(result)) Expect(actualErr).NotTo(HaveOccurred()) }) - It("should call the function with the request and return an error.", func() { + It("should call the function with the request and return an error.", func(ctx SpecContext) { request := reconcile.Request{ NamespacedName: types.NamespacedName{Name: "foo", Namespace: "bar"}, } @@ -55,15 +91,98 @@ var _ = Describe("reconcile", func() { } err := fmt.Errorf("hello world") - instance := reconcile.Func(func(r reconcile.Request) (reconcile.Result, error) { + instance := reconcile.Func(func(_ context.Context, r reconcile.Request) (reconcile.Result, error) { defer GinkgoRecover() Expect(r).To(Equal(request)) return result, err }) - actualResult, actualErr := instance.Reconcile(request) + actualResult, actualErr := instance.Reconcile(ctx, request) Expect(actualResult).To(Equal(result)) Expect(actualErr).To(Equal(err)) }) + + It("should allow unwrapping inner error from terminal error", func() { + inner := apierrors.NewGone("") + terminalError := reconcile.TerminalError(inner) + + Expect(apierrors.IsGone(terminalError)).To(BeTrue()) + }) + + It("should handle nil terminal errors properly", func() { + err := reconcile.TerminalError(nil) + Expect(err.Error()).To(Equal("nil terminal error")) + }) + }) + + Describe("AsReconciler", func() { + var testenv *envtest.Environment + var testClient client.Client + + BeforeEach(func() { + testenv = &envtest.Environment{} + + cfg, err := testenv.Start() + Expect(err).NotTo(HaveOccurred()) + + testClient, err = client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + Expect(testenv.Stop()).NotTo(HaveOccurred()) + }) + + Context("with an existing object", func() { + var key client.ObjectKey + + BeforeEach(func(ctx SpecContext) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test", + }, + } + key = client.ObjectKeyFromObject(cm) + + err := testClient.Create(ctx, cm) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should Get the object and call the ObjectReconciler", func(ctx SpecContext) { + var actual *corev1.ConfigMap + reconciler := reconcile.AsReconciler(testClient, &mockObjectReconciler{ + reconcileFunc: func(ctx context.Context, cm *corev1.ConfigMap) (reconcile.Result, error) { + actual = cm + return reconcile.Result{}, nil + }, + }) + + res, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: key}) + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(BeZero()) + Expect(actual).NotTo(BeNil()) + Expect(actual.ObjectMeta.Name).To(Equal(key.Name)) + Expect(actual.ObjectMeta.Namespace).To(Equal(key.Namespace)) + }) + }) + + Context("with an object that doesn't exist", func() { + It("should not call the ObjectReconciler", func(ctx SpecContext) { + called := false + reconciler := reconcile.AsReconciler(testClient, &mockObjectReconciler{ + reconcileFunc: func(ctx context.Context, cm *corev1.ConfigMap) (reconcile.Result, error) { + called = true + return reconcile.Result{}, nil + }, + }) + + key := types.NamespacedName{Namespace: "default", Name: "fake-obj"} + res, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: key}) + Expect(err).NotTo(HaveOccurred()) + Expect(res).To(BeZero()) + Expect(called).To(BeFalse()) + }) + }) }) }) diff --git a/pkg/reconcile/reconciletest/reconcile.go b/pkg/reconcile/reconciletest/reconcile.go deleted file mode 100644 index 1d147bc536..0000000000 --- a/pkg/reconcile/reconciletest/reconcile.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package reconciletest - -import "sigs.k8s.io/controller-runtime/pkg/reconcile" - -var _ reconcile.Reconciler = &FakeReconcile{} - -// FakeReconcile implements reconcile.Reconciler by always returning Result and Err -type FakeReconcile struct { - // Result is the result that will be returned by Reconciler - Result reconcile.Result - - // Err is the error that will be returned by Reconciler - Err error - - // If specified, Reconciler will write Requests to Chan - Chan chan reconcile.Request -} - -// Reconcile implements reconcile.Reconciler -func (f *FakeReconcile) Reconcile(r reconcile.Request) (reconcile.Result, error) { - if f.Chan != nil { - f.Chan <- r - } - return f.Result, f.Err -} diff --git a/pkg/recorder/example_test.go b/pkg/recorder/example_test.go index c900423d77..969420d817 100644 --- a/pkg/recorder/example_test.go +++ b/pkg/recorder/example_test.go @@ -17,11 +17,9 @@ limitations under the License. package recorder_test import ( - "time" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + _ "github.com/onsi/ginkgo/v2" "sigs.k8s.io/controller-runtime/pkg/recorder" ) @@ -48,13 +46,3 @@ func Example_eventf() { recorder.Eventf(somePod, corev1.EventTypeNormal, "DislikesCheese", "Not even %s?", mildCheese) } - -func Example_pastEventf() { - // recorderProvider is a recorder.Provider - recorder := recorderProvider.GetEventRecorderFor("my-controller") - - // emit a backdated event (potentially with variable message) - recorder.PastEventf(somePod, metav1.Time{Time: time.Now().Add(-5 * time.Minute)}, - corev1.EventTypeWarning, "ForgottenCrackers", - "Crackers, Gromit! We forgot the crackers!") -} diff --git a/pkg/runtime/inject/inject.go b/pkg/runtime/inject/inject.go deleted file mode 100644 index b47a91d042..0000000000 --- a/pkg/runtime/inject/inject.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package inject - -import ( - "github.com/go-logr/logr" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/rest" - - "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// Cache is used by the ControllerManager to inject Cache into Sources, EventHandlers, Predicates, and -// Reconciles -type Cache interface { - InjectCache(cache cache.Cache) error -} - -// CacheInto will set informers on i and return the result if it implements Cache. Returns -//// false if i does not implement Cache. -func CacheInto(c cache.Cache, i interface{}) (bool, error) { - if s, ok := i.(Cache); ok { - return true, s.InjectCache(c) - } - return false, nil -} - -// APIReader is used by the Manager to inject the APIReader into necessary types. -type APIReader interface { - InjectAPIReader(client.Reader) error -} - -// APIReaderInto will set APIReader on i and return the result if it implements APIReaderInto. -// Returns false if i does not implement APIReader -func APIReaderInto(reader client.Reader, i interface{}) (bool, error) { - if s, ok := i.(APIReader); ok { - return true, s.InjectAPIReader(reader) - } - return false, nil -} - -// Config is used by the ControllerManager to inject Config into Sources, EventHandlers, Predicates, and -// Reconciles -type Config interface { - InjectConfig(*rest.Config) error -} - -// ConfigInto will set config on i and return the result if it implements Config. Returns -//// false if i does not implement Config. -func ConfigInto(config *rest.Config, i interface{}) (bool, error) { - if s, ok := i.(Config); ok { - return true, s.InjectConfig(config) - } - return false, nil -} - -// Client is used by the ControllerManager to inject client into Sources, EventHandlers, Predicates, and -// Reconciles -type Client interface { - InjectClient(client.Client) error -} - -// ClientInto will set client on i and return the result if it implements Client. Returns -// false if i does not implement Client. -func ClientInto(client client.Client, i interface{}) (bool, error) { - if s, ok := i.(Client); ok { - return true, s.InjectClient(client) - } - return false, nil -} - -// Scheme is used by the ControllerManager to inject Scheme into Sources, EventHandlers, Predicates, and -// Reconciles -type Scheme interface { - InjectScheme(scheme *runtime.Scheme) error -} - -// SchemeInto will set scheme and return the result on i if it implements Scheme. Returns -// false if i does not implement Scheme. -func SchemeInto(scheme *runtime.Scheme, i interface{}) (bool, error) { - if is, ok := i.(Scheme); ok { - return true, is.InjectScheme(scheme) - } - return false, nil -} - -// Stoppable is used by the ControllerManager to inject stop channel into Sources, -// EventHandlers, Predicates, and Reconciles. -type Stoppable interface { - InjectStopChannel(<-chan struct{}) error -} - -// StopChannelInto will set stop channel on i and return the result if it implements Stoppable. -// Returns false if i does not implement Stoppable. -func StopChannelInto(stop <-chan struct{}, i interface{}) (bool, error) { - if s, ok := i.(Stoppable); ok { - return true, s.InjectStopChannel(stop) - } - return false, nil -} - -// Mapper is used to inject the rest mapper to components that may need it -type Mapper interface { - InjectMapper(meta.RESTMapper) error -} - -// MapperInto will set the rest mapper on i and return the result if it implements Mapper. -// Returns false if i does not implement Mapper. -func MapperInto(mapper meta.RESTMapper, i interface{}) (bool, error) { - if m, ok := i.(Mapper); ok { - return true, m.InjectMapper(mapper) - } - return false, nil -} - -// Func injects dependencies into i. -type Func func(i interface{}) error - -// Injector is used by the ControllerManager to inject Func into Controllers -type Injector interface { - InjectFunc(f Func) error -} - -// InjectorInto will set f and return the result on i if it implements Injector. Returns -// false if i does not implement Injector. -func InjectorInto(f Func, i interface{}) (bool, error) { - if ii, ok := i.(Injector); ok { - return true, ii.InjectFunc(f) - } - return false, nil -} - -// Logger is used to inject Loggers into components that need them -// and don't otherwise have opinions. -type Logger interface { - InjectLogger(l logr.Logger) error -} - -// LoggerInto will set the logger on the given object if it implements inject.Logger, -// returning true if a InjectLogger was called, and false otherwise. -func LoggerInto(l logr.Logger, i interface{}) (bool, error) { - if injectable, wantsLogger := i.(Logger); wantsLogger { - return true, injectable.InjectLogger(l) - } - return false, nil -} diff --git a/pkg/runtime/inject/inject_test.go b/pkg/runtime/inject/inject_test.go deleted file mode 100644 index 6f96ab5f63..0000000000 --- a/pkg/runtime/inject/inject_test.go +++ /dev/null @@ -1,328 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package inject - -import ( - "fmt" - "reflect" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/cache/informertest" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -var instance *testSource -var uninjectable *failSource -var errInjectFail = fmt.Errorf("injection fails") -var expectedFalse = false - -var _ = Describe("runtime inject", func() { - - BeforeEach(func() { - instance = &testSource{} - uninjectable = &failSource{} - }) - - It("should set informers", func() { - injectedCache := &informertest.FakeInformers{} - - By("Validating injecting the informer") - res, err := CacheInto(injectedCache, instance) - Expect(err).NotTo(HaveOccurred()) - Expect(res).To(Equal(true)) - Expect(injectedCache).To(Equal(instance.GetCache())) - - By("Returing false if the type does not implement inject.Cache") - res, err = CacheInto(injectedCache, uninjectable) - Expect(err).NotTo(HaveOccurred()) - Expect(res).To(Equal(expectedFalse)) - Expect(uninjectable.GetCache()).To(BeNil()) - - By("Returning an error if informer injection fails") - res, err = CacheInto(nil, instance) - Expect(err).To(Equal(errInjectFail)) - Expect(res).To(Equal(true)) - - }) - - It("should set config", func() { - - cfg := &rest.Config{} - - By("Validating injecting config") - res, err := ConfigInto(cfg, instance) - Expect(err).NotTo(HaveOccurred()) - Expect(res).To(Equal(true)) - Expect(cfg).To(Equal(instance.GetConfig())) - - By("Returning false if the type does not implement inject.Config") - res, err = ConfigInto(cfg, uninjectable) - Expect(err).NotTo(HaveOccurred()) - Expect(res).To(Equal(false)) - Expect(uninjectable.GetConfig()).To(BeNil()) - - By("Returning an error if config injection fails") - res, err = ConfigInto(nil, instance) - Expect(err).To(Equal(errInjectFail)) - Expect(res).To(Equal(true)) - }) - - It("should set client", func() { - client := client.DelegatingClient{} - - By("Validating injecting client") - res, err := ClientInto(client, instance) - Expect(err).NotTo(HaveOccurred()) - Expect(res).To(Equal(true)) - Expect(client).To(Equal(instance.GetClient())) - - By("Returning false if the type does not implement inject.Client") - res, err = ClientInto(client, uninjectable) - Expect(err).NotTo(HaveOccurred()) - Expect(res).To(Equal(false)) - Expect(uninjectable.GetClient()).To(BeNil()) - - By("Returning an error if client injection fails") - res, err = ClientInto(nil, instance) - Expect(err).To(Equal(errInjectFail)) - Expect(res).To(Equal(true)) - }) - - It("should set scheme", func() { - - scheme := runtime.NewScheme() - - By("Validating injecting scheme") - res, err := SchemeInto(scheme, instance) - Expect(err).NotTo(HaveOccurred()) - Expect(res).To(Equal(true)) - Expect(scheme).To(Equal(instance.GetScheme())) - - By("Returning false if the type does not implement inject.Scheme") - res, err = SchemeInto(scheme, uninjectable) - Expect(err).NotTo(HaveOccurred()) - Expect(res).To(Equal(false)) - Expect(uninjectable.GetScheme()).To(BeNil()) - - By("Returning an error if scheme injection fails") - res, err = SchemeInto(nil, instance) - Expect(err).To(Equal(errInjectFail)) - Expect(res).To(Equal(true)) - }) - - It("should set stop channel", func() { - - stop := make(<-chan struct{}) - - By("Validating injecting stop channel") - res, err := StopChannelInto(stop, instance) - Expect(err).NotTo(HaveOccurred()) - Expect(res).To(Equal(true)) - Expect(stop).To(Equal(instance.GetStop())) - - By("Returning false if the type does not implement inject.Stoppable") - res, err = StopChannelInto(stop, uninjectable) - Expect(err).NotTo(HaveOccurred()) - Expect(res).To(Equal(false)) - Expect(uninjectable.GetStop()).To(BeNil()) - - By("Returning an error if stop channel injection fails") - res, err = StopChannelInto(nil, instance) - Expect(err).To(Equal(errInjectFail)) - Expect(res).To(Equal(true)) - }) - - It("should set api reader", func() { - apiReader := &client.DelegatingReader{} - - By("Validating injecting client") - res, err := APIReaderInto(apiReader, instance) - Expect(err).NotTo(HaveOccurred()) - Expect(res).To(Equal(true)) - Expect(apiReader).To(Equal(instance.GetAPIReader())) - - By("Returning false if the type does not implement inject.Client") - res, err = APIReaderInto(apiReader, uninjectable) - Expect(err).NotTo(HaveOccurred()) - Expect(res).To(Equal(false)) - Expect(uninjectable.GetAPIReader()).To(BeNil()) - - By("Returning an error if client injection fails") - res, err = APIReaderInto(nil, instance) - Expect(err).To(Equal(errInjectFail)) - Expect(res).To(Equal(true)) - }) - - It("should set dependencies", func() { - - f := func(interface{}) error { return nil } - - By("Validating injecting dependencies") - res, err := InjectorInto(f, instance) - Expect(err).NotTo(HaveOccurred()) - Expect(res).To(Equal(true)) - Expect(reflect.ValueOf(f).Pointer()).To(Equal(reflect.ValueOf(instance.GetFunc()).Pointer())) - - By("Returning false if the type does not implement inject.Injector") - res, err = InjectorInto(f, uninjectable) - Expect(err).NotTo(HaveOccurred()) - Expect(res).To(Equal(false)) - Expect(uninjectable.GetFunc()).To(BeNil()) - - By("Returning an error if dependencies injection fails") - res, err = InjectorInto(nil, instance) - Expect(err).To(Equal(errInjectFail)) - Expect(res).To(Equal(true)) - }) - -}) - -type testSource struct { - scheme *runtime.Scheme - cache cache.Cache - config *rest.Config - client client.Client - apiReader client.Reader - f Func - stop <-chan struct{} -} - -func (s *testSource) InjectCache(c cache.Cache) error { - if c != nil { - s.cache = c - return nil - } - return fmt.Errorf("injection fails") -} - -func (s *testSource) InjectConfig(config *rest.Config) error { - if config != nil { - s.config = config - return nil - } - return fmt.Errorf("injection fails") -} - -func (s *testSource) InjectClient(client client.Client) error { - if client != nil { - s.client = client - return nil - } - return fmt.Errorf("injection fails") -} - -func (s *testSource) InjectScheme(scheme *runtime.Scheme) error { - if scheme != nil { - s.scheme = scheme - return nil - } - return fmt.Errorf("injection fails") -} - -func (s *testSource) InjectStopChannel(stop <-chan struct{}) error { - if stop != nil { - s.stop = stop - return nil - } - return fmt.Errorf("injection fails") -} - -func (s *testSource) InjectAPIReader(reader client.Reader) error { - if reader != nil { - s.apiReader = reader - return nil - } - return fmt.Errorf("injection fails") -} - -func (s *testSource) InjectFunc(f Func) error { - if f != nil { - s.f = f - return nil - } - return fmt.Errorf("injection fails") -} - -func (s *testSource) GetCache() cache.Cache { - return s.cache -} - -func (s *testSource) GetConfig() *rest.Config { - return s.config -} - -func (s *testSource) GetScheme() *runtime.Scheme { - return s.scheme -} - -func (s *testSource) GetClient() client.Client { - return s.client -} - -func (s *testSource) GetAPIReader() client.Reader { - return s.apiReader -} - -func (s *testSource) GetFunc() Func { - return s.f -} - -func (s *testSource) GetStop() <-chan struct{} { - return s.stop -} - -type failSource struct { - scheme *runtime.Scheme - cache cache.Cache - config *rest.Config - client client.Client - apiReader client.Reader - f Func - stop <-chan struct{} -} - -func (s *failSource) GetCache() cache.Cache { - return s.cache -} - -func (s *failSource) GetConfig() *rest.Config { - return s.config -} - -func (s *failSource) GetScheme() *runtime.Scheme { - return s.scheme -} - -func (s *failSource) GetClient() client.Client { - return s.client -} - -func (s *failSource) GetAPIReader() client.Reader { - return s.apiReader -} - -func (s *failSource) GetFunc() Func { - return s.f -} - -func (s *failSource) GetStop() <-chan struct{} { - return s.stop -} diff --git a/pkg/runtime/log/log.go b/pkg/runtime/log/log.go deleted file mode 100644 index c5ec5a2393..0000000000 --- a/pkg/runtime/log/log.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package log contains (deprecated) utilities for fetching a new logger when -// one is not already available. -// -// Deprecated: use pkg/log -package log - -import ( - "github.com/go-logr/logr" - - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" -) - -var ( - // ZapLogger is a Logger implementation. - // If development is true, a Zap development config will be used - // (stacktraces on warnings, no sampling), otherwise a Zap production - // config will be used (stacktraces on errors, sampling). - ZapLogger = zap.Logger - - // ZapLoggerTo returns a new Logger implementation using Zap which logs - // to the given destination, instead of stderr. It otherise behaves like - // ZapLogger. - ZapLoggerTo = zap.Logger - - // SetLogger sets a concrete logging implementation for all deferred Loggers. - SetLogger = log.SetLogger - - // Log is the base logger used by kubebuilder. It delegates - // to another logr.Logger. You *must* call SetLogger to - // get any actual logging. - Log = log.Log - - // KBLog is a base parent logger for use inside controller-runtime. - // Deprecated: don't use this outside controller-runtime - // (inside CR, use pkg/internal/log.RuntimeLog) - KBLog logr.Logger -) - -func init() { - KBLog = log.Log.WithName("controller-runtime") -} diff --git a/pkg/runtime/signals/signal.go b/pkg/runtime/signals/signal.go deleted file mode 100644 index 64bbcef368..0000000000 --- a/pkg/runtime/signals/signal.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package signals contains libraries for handling signals to gracefully -// shutdown the manager in combination with Kubernetes pod graceful termination -// policy. -// -// Deprecated: use pkg/manager/signals instead. -package signals - -import ( - "sigs.k8s.io/controller-runtime/pkg/manager/signals" -) - -var ( - // SetupSignalHandler registers for SIGTERM and SIGINT. A stop channel is returned - // which is closed on one of these signals. If a second signal is caught, the program - // is terminated with exit code 1. - SetupSignalHandler = signals.SetupSignalHandler -) diff --git a/pkg/scheme/scheme.go b/pkg/scheme/scheme.go index cd60319674..55ebe21773 100644 --- a/pkg/scheme/scheme.go +++ b/pkg/scheme/scheme.go @@ -21,37 +21,36 @@ limitations under the License. // Each API group should define a utility function // called AddToScheme for adding its types to a Scheme: // -// // in package myapigroupv1... -// var ( -// SchemeGroupVersion = schema.GroupVersion{Group: "my.api.group", Version: "v1"} -// SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} -// AddToScheme = SchemeBuilder.AddToScheme -// ) +// // in package myapigroupv1... +// var ( +// SchemeGroupVersion = schema.GroupVersion{Group: "my.api.group", Version: "v1"} +// SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} +// AddToScheme = SchemeBuilder.AddToScheme +// ) // -// func init() { -// SchemeBuilder.Register(&MyType{}, &MyTypeList) -// } -// var ( -// scheme *runtime.Scheme = runtime.NewScheme() -// ) +// func init() { +// SchemeBuilder.Register(&MyType{}, &MyTypeList) +// } +// var ( +// scheme *runtime.Scheme = runtime.NewScheme() +// ) // // This also true of the built-in Kubernetes types. Then, in the entrypoint for -// your manager, assemble the scheme containing exactly the types you need. -// For instance, if our controller needs types from the core/v1 API group (e.g. Pod), -// plus types from my.api.group/v1: +// your manager, assemble the scheme containing exactly the types you need, +// panicing if scheme registration failed. For instance, if our controller needs +// types from the core/v1 API group (e.g. Pod), plus types from my.api.group/v1: // -// func init() { -// myapigroupv1.AddToScheme(scheme) -// kubernetesscheme.AddToScheme(scheme) -// } -// -// func main() { -// mgr := controllers.NewManager(controllers.GetConfigOrDie(), manager.Options{ -// Scheme: scheme, -// }) -// // ... -// } +// func init() { +// utilruntime.Must(myapigroupv1.AddToScheme(scheme)) +// utilruntime.Must(kubernetesscheme.AddToScheme(scheme)) +// } // +// func main() { +// mgr := controllers.NewManager(context.Background(), controllers.GetConfigOrDie(), manager.Options{ +// Scheme: scheme, +// }) +// // ... +// } package scheme import ( @@ -66,7 +65,7 @@ type Builder struct { runtime.SchemeBuilder } -// Register adds one or objects to the SchemeBuilder so they can be added to a Scheme. Register mutates bld. +// Register adds one or more objects to the SchemeBuilder so they can be added to a Scheme. Register mutates bld. func (bld *Builder) Register(object ...runtime.Object) *Builder { bld.SchemeBuilder.Register(func(scheme *runtime.Scheme) error { scheme.AddKnownTypes(bld.GroupVersion, object...) diff --git a/pkg/scheme/scheme_suite_test.go b/pkg/scheme/scheme_suite_test.go index 942fcd478a..36ddd9decc 100644 --- a/pkg/scheme/scheme_suite_test.go +++ b/pkg/scheme/scheme_suite_test.go @@ -19,7 +19,7 @@ package scheme_test import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) diff --git a/pkg/scheme/scheme_test.go b/pkg/scheme/scheme_test.go index 6efcc72655..37c6766e6f 100644 --- a/pkg/scheme/scheme_test.go +++ b/pkg/scheme/scheme_test.go @@ -19,8 +19,9 @@ package scheme_test import ( "reflect" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -37,28 +38,29 @@ var _ = Describe("Scheme", func() { Build() Expect(err).NotTo(HaveOccurred()) - Expect(s.AllKnownTypes()).To(HaveLen(16)) - Expect(s.AllKnownTypes()[gv.WithKind("Pod")]).To(Equal(reflect.TypeOf(corev1.Pod{}))) - Expect(s.AllKnownTypes()[gv.WithKind("PodList")]).To(Equal(reflect.TypeOf(corev1.PodList{}))) - - // Base types - Expect(s.AllKnownTypes()).To(HaveKey(gv.WithKind("UpdateOptions"))) - Expect(s.AllKnownTypes()).To(HaveKey(gv.WithKind("PatchOptions"))) - Expect(s.AllKnownTypes()).To(HaveKey(gv.WithKind("DeleteOptions"))) - Expect(s.AllKnownTypes()).To(HaveKey(gv.WithKind("ExportOptions"))) - Expect(s.AllKnownTypes()).To(HaveKey(gv.WithKind("GetOptions"))) - Expect(s.AllKnownTypes()).To(HaveKey(gv.WithKind("ListOptions"))) - Expect(s.AllKnownTypes()).To(HaveKey(gv.WithKind("WatchEvent"))) - internalGv := schema.GroupVersion{Group: "core", Version: "__internal"} - Expect(s.AllKnownTypes()).To(HaveKey(internalGv.WithKind("WatchEvent"))) - emptyGv := schema.GroupVersion{Group: "", Version: "v1"} - Expect(s.AllKnownTypes()).To(HaveKey(emptyGv.WithKind("APIGroup"))) - Expect(s.AllKnownTypes()).To(HaveKey(emptyGv.WithKind("APIGroupList"))) - Expect(s.AllKnownTypes()).To(HaveKey(emptyGv.WithKind("APIResourceList"))) - Expect(s.AllKnownTypes()).To(HaveKey(emptyGv.WithKind("APIVersions"))) - Expect(s.AllKnownTypes()).To(HaveKey(emptyGv.WithKind("Status"))) + Expect(s.AllKnownTypes()).To(MatchAllKeys(Keys{ + gv.WithKind("Pod"): Equal(reflect.TypeOf(corev1.Pod{})), + gv.WithKind("PodList"): Equal(reflect.TypeOf(corev1.PodList{})), + + // Base types + gv.WithKind("CreateOptions"): Ignore(), + gv.WithKind("UpdateOptions"): Ignore(), + gv.WithKind("PatchOptions"): Ignore(), + gv.WithKind("DeleteOptions"): Ignore(), + gv.WithKind("GetOptions"): Ignore(), + gv.WithKind("ListOptions"): Ignore(), + gv.WithKind("WatchEvent"): Ignore(), + + internalGv.WithKind("WatchEvent"): Ignore(), + + emptyGv.WithKind("APIGroup"): Ignore(), + emptyGv.WithKind("APIGroupList"): Ignore(), + emptyGv.WithKind("APIResourceList"): Ignore(), + emptyGv.WithKind("APIVersions"): Ignore(), + emptyGv.WithKind("Status"): Ignore(), + })) }) It("should be able to add types from other Builders", func() { @@ -73,45 +75,45 @@ var _ = Describe("Scheme", func() { Build() Expect(err).NotTo(HaveOccurred()) - Expect(s.AllKnownTypes()).To(HaveLen(27)) - - // Types from b1 - Expect(s.AllKnownTypes()[gv1.WithKind("Pod")]).To(Equal(reflect.TypeOf(corev1.Pod{}))) - Expect(s.AllKnownTypes()[gv1.WithKind("PodList")]).To(Equal(reflect.TypeOf(corev1.PodList{}))) - - // Types from b2 - Expect(s.AllKnownTypes()[gv2.WithKind("Deployment")]).To(Equal(reflect.TypeOf(appsv1.Deployment{}))) - Expect(s.AllKnownTypes()[gv2.WithKind("Deployment")]).To(Equal(reflect.TypeOf(appsv1.Deployment{}))) - - // Base types - Expect(s.AllKnownTypes()).To(HaveKey(gv1.WithKind("UpdateOptions"))) - Expect(s.AllKnownTypes()).To(HaveKey(gv1.WithKind("PatchOptions"))) - Expect(s.AllKnownTypes()).To(HaveKey(gv1.WithKind("DeleteOptions"))) - Expect(s.AllKnownTypes()).To(HaveKey(gv1.WithKind("ExportOptions"))) - Expect(s.AllKnownTypes()).To(HaveKey(gv1.WithKind("GetOptions"))) - Expect(s.AllKnownTypes()).To(HaveKey(gv1.WithKind("ListOptions"))) - Expect(s.AllKnownTypes()).To(HaveKey(gv1.WithKind("WatchEvent"))) - internalGv1 := schema.GroupVersion{Group: "core", Version: "__internal"} - Expect(s.AllKnownTypes()).To(HaveKey(internalGv1.WithKind("WatchEvent"))) - - Expect(s.AllKnownTypes()).To(HaveKey(gv2.WithKind("UpdateOptions"))) - Expect(s.AllKnownTypes()).To(HaveKey(gv2.WithKind("PatchOptions"))) - Expect(s.AllKnownTypes()).To(HaveKey(gv2.WithKind("DeleteOptions"))) - Expect(s.AllKnownTypes()).To(HaveKey(gv2.WithKind("ExportOptions"))) - Expect(s.AllKnownTypes()).To(HaveKey(gv2.WithKind("GetOptions"))) - Expect(s.AllKnownTypes()).To(HaveKey(gv2.WithKind("ListOptions"))) - Expect(s.AllKnownTypes()).To(HaveKey(gv2.WithKind("WatchEvent"))) - internalGv2 := schema.GroupVersion{Group: "apps", Version: "__internal"} - Expect(s.AllKnownTypes()).To(HaveKey(internalGv2.WithKind("WatchEvent"))) - emptyGv := schema.GroupVersion{Group: "", Version: "v1"} - Expect(s.AllKnownTypes()).To(HaveKey(emptyGv.WithKind("APIGroup"))) - Expect(s.AllKnownTypes()).To(HaveKey(emptyGv.WithKind("APIGroupList"))) - Expect(s.AllKnownTypes()).To(HaveKey(emptyGv.WithKind("APIResourceList"))) - Expect(s.AllKnownTypes()).To(HaveKey(emptyGv.WithKind("APIVersions"))) - Expect(s.AllKnownTypes()).To(HaveKey(emptyGv.WithKind("Status"))) + Expect(s.AllKnownTypes()).To(MatchAllKeys(Keys{ + // Types from b1 + gv1.WithKind("Pod"): Equal(reflect.TypeOf(corev1.Pod{})), + gv1.WithKind("PodList"): Equal(reflect.TypeOf(corev1.PodList{})), + + // Types from b2 + gv2.WithKind("Deployment"): Equal(reflect.TypeOf(appsv1.Deployment{})), + gv2.WithKind("DeploymentList"): Equal(reflect.TypeOf(appsv1.DeploymentList{})), + + // Base types + gv1.WithKind("CreateOptions"): Ignore(), + gv1.WithKind("UpdateOptions"): Ignore(), + gv1.WithKind("PatchOptions"): Ignore(), + gv1.WithKind("DeleteOptions"): Ignore(), + gv1.WithKind("GetOptions"): Ignore(), + gv1.WithKind("ListOptions"): Ignore(), + gv1.WithKind("WatchEvent"): Ignore(), + + internalGv1.WithKind("WatchEvent"): Ignore(), + + gv2.WithKind("CreateOptions"): Ignore(), + gv2.WithKind("UpdateOptions"): Ignore(), + gv2.WithKind("PatchOptions"): Ignore(), + gv2.WithKind("DeleteOptions"): Ignore(), + gv2.WithKind("GetOptions"): Ignore(), + gv2.WithKind("ListOptions"): Ignore(), + gv2.WithKind("WatchEvent"): Ignore(), + + internalGv2.WithKind("WatchEvent"): Ignore(), + + emptyGv.WithKind("APIGroup"): Ignore(), + emptyGv.WithKind("APIGroupList"): Ignore(), + emptyGv.WithKind("APIResourceList"): Ignore(), + emptyGv.WithKind("APIVersions"): Ignore(), + emptyGv.WithKind("Status"): Ignore(), + })) }) }) }) diff --git a/pkg/source/example_test.go b/pkg/source/example_test.go index d306eaf583..b596ff0a0a 100644 --- a/pkg/source/example_test.go +++ b/pkg/source/example_test.go @@ -21,15 +21,17 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/source" ) +var mgr manager.Manager var ctrl controller.Controller // This example Watches for Pod Events (e.g. Create / Update / Delete) and enqueues a reconcile.Request // with the Name and Namespace of the Pod. func ExampleKind() { - err := ctrl.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForObject{}) + err := ctrl.Watch(source.Kind(mgr.GetCache(), &corev1.Pod{}, &handler.TypedEnqueueRequestForObject[*corev1.Pod]{})) if err != nil { // handle it } @@ -41,8 +43,10 @@ func ExampleChannel() { events := make(chan event.GenericEvent) err := ctrl.Watch( - &source.Channel{Source: events}, - &handler.EnqueueRequestForObject{}, + source.Channel( + events, + &handler.EnqueueRequestForObject{}, + ), ) if err != nil { // handle it diff --git a/pkg/source/source.go b/pkg/source/source.go index c777a8c719..c2c2dc4e07 100644 --- a/pkg/source/source.go +++ b/pkg/source/source.go @@ -17,173 +17,193 @@ limitations under the License. package source import ( + "context" + "errors" "fmt" "sync" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" + toolscache "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" - "sigs.k8s.io/controller-runtime/pkg/source/internal" + internal "sigs.k8s.io/controller-runtime/pkg/internal/source" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/predicate" ) -var log = logf.RuntimeLog.WithName("source") +var logInformer = logf.RuntimeLog.WithName("source").WithName("Informer") -const ( - // defaultBufferSize is the default number of event notifications that can be buffered. - defaultBufferSize = 1024 -) - -// Source is a source of events (eh.g. Create, Update, Delete operations on Kubernetes Objects, Webhook callbacks, etc) +// Source is a source of events (e.g. Create, Update, Delete operations on Kubernetes Objects, Webhook callbacks, etc) // which should be processed by event.EventHandlers to enqueue reconcile.Requests. // // * Use Kind for events originating in the cluster (e.g. Pod Create, Pod Update, Deployment Update). // -// * Use Channel for events originating outside the cluster (eh.g. GitHub Webhook callback, Polling external urls). +// * Use Channel for events originating outside the cluster (e.g. GitHub Webhook callback, Polling external urls). +// +// Users may build their own Source implementations. +type Source = TypedSource[reconcile.Request] + +// TypedSource is a generic source of events (e.g. Create, Update, Delete operations on Kubernetes Objects, Webhook callbacks, etc) +// which should be processed by event.EventHandlers to enqueue a request. +// +// * Use Kind for events originating in the cluster (e.g. Pod Create, Pod Update, Deployment Update). +// +// * Use Channel for events originating outside the cluster (e.g. GitHub Webhook callback, Polling external urls). // -// Users may build their own Source implementations. If their implementations implement any of the inject package -// interfaces, the dependencies will be injected by the Controller when Watch is called. -type Source interface { - // Start is internal and should be called only by the Controller to register an EventHandler with the Informer - // to enqueue reconcile.Requests. - Start(handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error +// Users may build their own Source implementations. +type TypedSource[request comparable] interface { + // Start is internal and should be called only by the Controller to start the source. + // Start must be non-blocking. + Start(context.Context, workqueue.TypedRateLimitingInterface[request]) error } -// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create) -type Kind struct { - // Type is the type of object to watch. e.g. &v1.Pod{} - Type runtime.Object +// SyncingSource is a source that needs syncing prior to being usable. The controller +// will call its WaitForSync prior to starting workers. +type SyncingSource = TypedSyncingSource[reconcile.Request] - // cache used to watch APIs - cache cache.Cache +// TypedSyncingSource is a source that needs syncing prior to being usable. The controller +// will call its WaitForSync prior to starting workers. +type TypedSyncingSource[request comparable] interface { + TypedSource[request] + WaitForSync(ctx context.Context) error } -var _ Source = &Kind{} - -// Start is internal and should be called only by the Controller to register an EventHandler with the Informer -// to enqueue reconcile.Requests. -func (ks *Kind) Start(handler handler.EventHandler, queue workqueue.RateLimitingInterface, - prct ...predicate.Predicate) error { +// Kind creates a KindSource with the given cache provider. +func Kind[object client.Object]( + cache cache.Cache, + obj object, + handler handler.TypedEventHandler[object, reconcile.Request], + predicates ...predicate.TypedPredicate[object], +) SyncingSource { + return TypedKind(cache, obj, handler, predicates...) +} - // Type should have been specified by the user. - if ks.Type == nil { - return fmt.Errorf("must specify Kind.Type") +// TypedKind creates a KindSource with the given cache provider. +func TypedKind[object client.Object, request comparable]( + cache cache.Cache, + obj object, + handler handler.TypedEventHandler[object, request], + predicates ...predicate.TypedPredicate[object], +) TypedSyncingSource[request] { + return &internal.Kind[object, request]{ + Type: obj, + Cache: cache, + Handler: handler, + Predicates: predicates, } +} - // cache should have been injected before Start was called - if ks.cache == nil { - return fmt.Errorf("must call CacheInto on Kind before calling Start") - } +var _ Source = &channel[string, reconcile.Request]{} - // Lookup the Informer from the Cache and add an EventHandler which populates the Queue - i, err := ks.cache.GetInformer(ks.Type) - if err != nil { - if kindMatchErr, ok := err.(*meta.NoKindMatchError); ok { - log.Error(err, "if kind is a CRD, it should be installed before calling Start", - "kind", kindMatchErr.GroupKind) - } - return err +// ChannelOpt allows to configure a source.Channel. +type ChannelOpt[object any, request comparable] func(*channel[object, request]) + +// WithPredicates adds the configured predicates to a source.Channel. +func WithPredicates[object any, request comparable](p ...predicate.TypedPredicate[object]) ChannelOpt[object, request] { + return func(c *channel[object, request]) { + c.predicates = append(c.predicates, p...) } - i.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct}) - return nil } -func (ks *Kind) String() string { - if ks.Type != nil && ks.Type.GetObjectKind() != nil { - return fmt.Sprintf("kind source: %v", ks.Type.GetObjectKind().GroupVersionKind().String()) +// WithBufferSize configures the buffer size for a source.Channel. By +// default, the buffer size is 1024. +func WithBufferSize[object any, request comparable](bufferSize int) ChannelOpt[object, request] { + return func(c *channel[object, request]) { + c.bufferSize = &bufferSize } - return fmt.Sprintf("kind source: unknown GVK") } -var _ inject.Cache = &Kind{} +// Channel is used to provide a source of events originating outside the cluster +// (e.g. GitHub Webhook callback). Channel requires the user to wire the external +// source (e.g. http handler) to write GenericEvents to the underlying channel. +func Channel[object any]( + source <-chan event.TypedGenericEvent[object], + handler handler.TypedEventHandler[object, reconcile.Request], + opts ...ChannelOpt[object, reconcile.Request], +) Source { + return TypedChannel[object, reconcile.Request](source, handler, opts...) +} -// InjectCache is internal should be called only by the Controller. InjectCache is used to inject -// the Cache dependency initialized by the ControllerManager. -func (ks *Kind) InjectCache(c cache.Cache) error { - if ks.cache == nil { - ks.cache = c +// TypedChannel is used to provide a source of events originating outside the cluster +// (e.g. GitHub Webhook callback). Channel requires the user to wire the external +// source (e.g. http handler) to write GenericEvents to the underlying channel. +func TypedChannel[object any, request comparable]( + source <-chan event.TypedGenericEvent[object], + handler handler.TypedEventHandler[object, request], + opts ...ChannelOpt[object, request], +) TypedSource[request] { + c := &channel[object, request]{ + source: source, + handler: handler, + } + for _, opt := range opts { + opt(c) } - return nil -} -var _ Source = &Channel{} + return c +} -// Channel is used to provide a source of events originating outside the cluster -// (e.g. GitHub Webhook callback). Channel requires the user to wire the external -// source (eh.g. http handler) to write GenericEvents to the underlying channel. -type Channel struct { +type channel[object any, request comparable] struct { // once ensures the event distribution goroutine will be performed only once once sync.Once - // Source is the source channel to fetch GenericEvents - Source <-chan event.GenericEvent + // source is the source channel to fetch GenericEvents + source <-chan event.TypedGenericEvent[object] - // stop is to end ongoing goroutine, and close the channels - stop <-chan struct{} + handler handler.TypedEventHandler[object, request] - // dest is the destination channels of the added event handlers - dest []chan event.GenericEvent + predicates []predicate.TypedPredicate[object] + + bufferSize *int - // DestBufferSize is the specified buffer size of dest channels. - // Default to 1024 if not specified. - DestBufferSize int + // dest is the destination channels of the added event handlers + dest []chan event.TypedGenericEvent[object] // destLock is to ensure the destination channels are safely added/removed destLock sync.Mutex } -func (cs *Channel) String() string { +func (cs *channel[object, request]) String() string { return fmt.Sprintf("channel source: %p", cs) } -var _ inject.Stoppable = &Channel{} - -// InjectStopChannel is internal should be called only by the Controller. -// It is used to inject the stop channel initialized by the ControllerManager. -func (cs *Channel) InjectStopChannel(stop <-chan struct{}) error { - if cs.stop == nil { - cs.stop = stop - } - - return nil -} - // Start implements Source and should only be called by the Controller. -func (cs *Channel) Start( - handler handler.EventHandler, - queue workqueue.RateLimitingInterface, - prct ...predicate.Predicate) error { +func (cs *channel[object, request]) Start( + ctx context.Context, + queue workqueue.TypedRateLimitingInterface[request], +) error { // Source should have been specified by the user. - if cs.Source == nil { + if cs.source == nil { return fmt.Errorf("must specify Channel.Source") } - - // stop should have been injected before Start was called - if cs.stop == nil { - return fmt.Errorf("must call InjectStop on Channel before calling Start") + if cs.handler == nil { + return errors.New("must specify Channel.Handler") } - // use default value if DestBufferSize not specified - if cs.DestBufferSize == 0 { - cs.DestBufferSize = defaultBufferSize + if cs.bufferSize == nil { + cs.bufferSize = ptr.To(1024) } + dst := make(chan event.TypedGenericEvent[object], *cs.bufferSize) + + cs.destLock.Lock() + cs.dest = append(cs.dest, dst) + cs.destLock.Unlock() + cs.once.Do(func() { // Distribute GenericEvents to all EventHandler / Queue pairs Watching this source - go cs.syncLoop() + go cs.syncLoop(ctx) }) - dst := make(chan event.GenericEvent, cs.DestBufferSize) go func() { for evt := range dst { shouldHandle := true - for _, p := range prct { + for _, p := range cs.predicates { if !p.Generic(evt) { shouldHandle = false break @@ -191,20 +211,19 @@ func (cs *Channel) Start( } if shouldHandle { - handler.Generic(evt, queue) + func() { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + cs.handler.Generic(ctx, evt, queue) + }() } } }() - cs.destLock.Lock() - defer cs.destLock.Unlock() - - cs.dest = append(cs.dest, dst) - return nil } -func (cs *Channel) doStop() { +func (cs *channel[object, request]) doStop() { cs.destLock.Lock() defer cs.destLock.Unlock() @@ -213,7 +232,7 @@ func (cs *Channel) doStop() { } } -func (cs *Channel) distribute(evt event.GenericEvent) { +func (cs *channel[object, request]) distribute(evt event.TypedGenericEvent[object]) { cs.destLock.Lock() defer cs.destLock.Unlock() @@ -227,38 +246,52 @@ func (cs *Channel) distribute(evt event.GenericEvent) { } } -func (cs *Channel) syncLoop() { +func (cs *channel[object, request]) syncLoop(ctx context.Context) { for { select { - case <-cs.stop: + case <-ctx.Done(): // Close destination channels cs.doStop() return - case evt := <-cs.Source: + case evt, stillOpen := <-cs.source: + if !stillOpen { + // if the source channel is closed, we're never gonna get + // anything more on it, so stop & bail + cs.doStop() + return + } cs.distribute(evt) } } } -// Informer is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create) +// Informer is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). type Informer struct { // Informer is the controller-runtime Informer - Informer cache.Informer + Informer cache.Informer + Handler handler.EventHandler + Predicates []predicate.Predicate } var _ Source = &Informer{} // Start is internal and should be called only by the Controller to register an EventHandler with the Informer // to enqueue reconcile.Requests. -func (is *Informer) Start(handler handler.EventHandler, queue workqueue.RateLimitingInterface, - prct ...predicate.Predicate) error { - +func (is *Informer) Start(ctx context.Context, queue workqueue.TypedRateLimitingInterface[reconcile.Request]) error { // Informer should have been specified by the user. if is.Informer == nil { return fmt.Errorf("must specify Informer.Informer") } + if is.Handler == nil { + return errors.New("must specify Informer.Handler") + } - is.Informer.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct}) + _, err := is.Informer.AddEventHandlerWithOptions(internal.NewEventHandler(ctx, queue, is.Handler, is.Predicates), toolscache.HandlerOptions{ + Logger: &logInformer, + }) + if err != nil { + return err + } return nil } @@ -266,15 +299,19 @@ func (is *Informer) String() string { return fmt.Sprintf("informer source: %p", is.Informer) } -// Func is a function that implements Source -type Func func(handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error +var _ Source = Func(nil) + +// Func is a function that implements Source. +type Func = TypedFunc[reconcile.Request] + +// TypedFunc is a function that implements Source. +type TypedFunc[request comparable] func(context.Context, workqueue.TypedRateLimitingInterface[request]) error -// Start implements Source -func (f Func) Start(evt handler.EventHandler, queue workqueue.RateLimitingInterface, - pr ...predicate.Predicate) error { - return f(evt, queue, pr...) +// Start implements Source. +func (f TypedFunc[request]) Start(ctx context.Context, queue workqueue.TypedRateLimitingInterface[request]) error { + return f(ctx, queue) } -func (f Func) String() string { +func (f TypedFunc[request]) String() string { return fmt.Sprintf("func source: %p", f) } diff --git a/pkg/source/source_integration_test.go b/pkg/source/source_integration_test.go index a10bddc9ed..cc0ba530ec 100644 --- a/pkg/source/source_integration_test.go +++ b/pkg/source/source_integration_test.go @@ -17,71 +17,64 @@ limitations under the License. package source_test import ( + "context" "fmt" "time" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" kubeinformers "k8s.io/client-go/informers" toolscache "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" ) var _ = Describe("Source", func() { - var instance1, instance2 *source.Kind - var obj runtime.Object - var q workqueue.RateLimitingInterface + var instance1, instance2 source.Source + var obj client.Object + var q workqueue.TypedRateLimitingInterface[reconcile.Request] var c1, c2 chan interface{} var ns string count := 0 - BeforeEach(func(done Done) { + BeforeEach(func(ctx SpecContext) { // Create the namespace for the test ns = fmt.Sprintf("controller-source-kindsource-%v", count) count++ - _, err := clientset.CoreV1().Namespaces().Create(&corev1.Namespace{ + _, err := clientset.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{Name: ns}, - }) + }, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - q = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") + q = workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[reconcile.Request](), + workqueue.TypedRateLimitingQueueConfig[reconcile.Request]{ + Name: "test", + }) c1 = make(chan interface{}) c2 = make(chan interface{}) - - close(done) - }) - - JustBeforeEach(func() { - instance1 = &source.Kind{Type: obj} - Expect(inject.CacheInto(icache, instance1)).To(BeTrue()) - - instance2 = &source.Kind{Type: obj} - Expect(inject.CacheInto(icache, instance2)).To(BeTrue()) }) - AfterEach(func(done Done) { - err := clientset.CoreV1().Namespaces().Delete(ns, &metav1.DeleteOptions{}) + AfterEach(func(ctx SpecContext) { + err := clientset.CoreV1().Namespaces().Delete(ctx, ns, metav1.DeleteOptions{}) Expect(err).NotTo(HaveOccurred()) close(c1) close(c2) - - close(done) }) Describe("Kind", func() { Context("for a Deployment resource", func() { obj = &appsv1.Deployment{} - It("should provide Deployment Events", func(done Done) { + It("should provide Deployment Events", func(ctx SpecContext) { var created, updated, deleted *appsv1.Deployment var err error @@ -110,17 +103,17 @@ var _ = Describe("Source", func() { // Create an event handler to verify the events newHandler := func(c chan interface{}) handler.Funcs { return handler.Funcs{ - CreateFunc: func(evt event.CreateEvent, rli workqueue.RateLimitingInterface) { + CreateFunc: func(ctx context.Context, evt event.CreateEvent, rli workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Expect(rli).To(Equal(q)) c <- evt }, - UpdateFunc: func(evt event.UpdateEvent, rli workqueue.RateLimitingInterface) { + UpdateFunc: func(ctx context.Context, evt event.UpdateEvent, rli workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Expect(rli).To(Equal(q)) c <- evt }, - DeleteFunc: func(evt event.DeleteEvent, rli workqueue.RateLimitingInterface) { + DeleteFunc: func(ctx context.Context, evt event.DeleteEvent, rli workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Expect(rli).To(Equal(q)) c <- evt @@ -131,11 +124,13 @@ var _ = Describe("Source", func() { handler2 := newHandler(c2) // Create 2 instances - Expect(instance1.Start(handler1, q)).To(Succeed()) - Expect(instance2.Start(handler2, q)).To(Succeed()) + instance1 = source.Kind(icache, obj, handler1) + instance2 = source.Kind(icache, obj, handler2) + Expect(instance1.Start(ctx, q)).To(Succeed()) + Expect(instance2.Start(ctx, q)).To(Succeed()) By("Creating a Deployment and expecting the CreateEvent.") - created, err = client.Create(deployment) + created, err = client.Create(ctx, deployment, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) Expect(created).NotTo(BeNil()) @@ -143,20 +138,18 @@ var _ = Describe("Source", func() { evt := <-c1 createEvt, ok := evt.(event.CreateEvent) Expect(ok).To(BeTrue(), fmt.Sprintf("expect %T to be %T", evt, event.CreateEvent{})) - Expect(createEvt.Meta).To(Equal(created)) Expect(createEvt.Object).To(Equal(created)) // Check second CreateEvent evt = <-c2 createEvt, ok = evt.(event.CreateEvent) Expect(ok).To(BeTrue(), fmt.Sprintf("expect %T to be %T", evt, event.CreateEvent{})) - Expect(createEvt.Meta).To(Equal(created)) Expect(createEvt.Object).To(Equal(created)) By("Updating a Deployment and expecting the UpdateEvent.") updated = created.DeepCopy() updated.Labels = map[string]string{"biz": "buz"} - updated, err = client.Update(updated) + updated, err = client.Update(ctx, updated, metav1.UpdateOptions{}) Expect(err).NotTo(HaveOccurred()) // Check first UpdateEvent @@ -164,10 +157,8 @@ var _ = Describe("Source", func() { updateEvt, ok := evt.(event.UpdateEvent) Expect(ok).To(BeTrue(), fmt.Sprintf("expect %T to be %T", evt, event.UpdateEvent{})) - Expect(updateEvt.MetaNew).To(Equal(updated)) Expect(updateEvt.ObjectNew).To(Equal(updated)) - Expect(updateEvt.MetaOld).To(Equal(created)) Expect(updateEvt.ObjectOld).To(Equal(created)) // Check second UpdateEvent @@ -175,34 +166,28 @@ var _ = Describe("Source", func() { updateEvt, ok = evt.(event.UpdateEvent) Expect(ok).To(BeTrue(), fmt.Sprintf("expect %T to be %T", evt, event.UpdateEvent{})) - Expect(updateEvt.MetaNew).To(Equal(updated)) Expect(updateEvt.ObjectNew).To(Equal(updated)) - Expect(updateEvt.MetaOld).To(Equal(created)) Expect(updateEvt.ObjectOld).To(Equal(created)) By("Deleting a Deployment and expecting the Delete.") deleted = updated.DeepCopy() - err = client.Delete(created.Name, &metav1.DeleteOptions{}) + err = client.Delete(ctx, created.Name, metav1.DeleteOptions{}) Expect(err).NotTo(HaveOccurred()) deleted.SetResourceVersion("") evt = <-c1 deleteEvt, ok := evt.(event.DeleteEvent) Expect(ok).To(BeTrue(), fmt.Sprintf("expect %T to be %T", evt, event.DeleteEvent{})) - deleteEvt.Meta.SetResourceVersion("") - Expect(deleteEvt.Meta).To(Equal(deleted)) + deleteEvt.Object.SetResourceVersion("") Expect(deleteEvt.Object).To(Equal(deleted)) evt = <-c2 deleteEvt, ok = evt.(event.DeleteEvent) Expect(ok).To(BeTrue(), fmt.Sprintf("expect %T to be %T", evt, event.DeleteEvent{})) - deleteEvt.Meta.SetResourceVersion("") - Expect(deleteEvt.Meta).To(Equal(deleted)) + deleteEvt.Object.SetResourceVersion("") Expect(deleteEvt.Object).To(Equal(deleted)) - - close(done) - }, 5) + }) }) // TODO(pwittrock): Write this test @@ -220,7 +205,7 @@ var _ = Describe("Source", func() { var informerFactory kubeinformers.SharedInformerFactory var stopTest chan struct{} - BeforeEach(func(done Done) { + BeforeEach(func() { stopTest = make(chan struct{}) informerFactory = kubeinformers.NewSharedInformerFactory(clientset, time.Second*30) depInformer = informerFactory.Apps().V1().ReplicaSets().Informer() @@ -247,125 +232,138 @@ var _ = Describe("Source", func() { }, }, } - close(done) }) - AfterEach(func(done Done) { + AfterEach(func() { close(stopTest) - close(done) }) Context("for a ReplicaSet resource", func() { - It("should provide a ReplicaSet CreateEvent", func(done Done) { + It("should provide a ReplicaSet CreateEvent", func(ctx SpecContext) { c := make(chan struct{}) - q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") - instance := &source.Informer{Informer: depInformer} - err := instance.Start(handler.Funcs{ - CreateFunc: func(evt event.CreateEvent, q2 workqueue.RateLimitingInterface) { - defer GinkgoRecover() - var err error - rs, err = clientset.AppsV1().ReplicaSets("default").Get(rs.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - Expect(q2).To(BeIdenticalTo(q)) - Expect(evt.Meta).To(Equal(rs)) - Expect(evt.Object).To(Equal(rs)) - close(c) - }, - UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected UpdateEvent") - }, - DeleteFunc: func(event.DeleteEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected DeleteEvent") - }, - GenericFunc: func(event.GenericEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected GenericEvent") + q := workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[reconcile.Request](), + workqueue.TypedRateLimitingQueueConfig[reconcile.Request]{ + Name: "test", + }) + instance := &source.Informer{ + Informer: depInformer, + Handler: handler.Funcs{ + CreateFunc: func(ctx context.Context, evt event.CreateEvent, q2 workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + var err error + rs, err := clientset.AppsV1().ReplicaSets("default").Get(ctx, rs.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Expect(q2).To(BeIdenticalTo(q)) + Expect(evt.Object).To(Equal(rs)) + close(c) + }, + UpdateFunc: func(context.Context, event.UpdateEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + Fail("Unexpected UpdateEvent") + }, + DeleteFunc: func(context.Context, event.DeleteEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + Fail("Unexpected DeleteEvent") + }, + GenericFunc: func(context.Context, event.GenericEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + Fail("Unexpected GenericEvent") + }, }, - }, q) + } + err := instance.Start(ctx, q) Expect(err).NotTo(HaveOccurred()) - rs, err = clientset.AppsV1().ReplicaSets("default").Create(rs) + _, err = clientset.AppsV1().ReplicaSets("default").Create(ctx, rs, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) <-c - close(done) - }, 30) + }) - It("should provide a ReplicaSet UpdateEvent", func(done Done) { + It("should provide a ReplicaSet UpdateEvent", func(ctx SpecContext) { var err error - rs, err = clientset.AppsV1().ReplicaSets("default").Get(rs.Name, metav1.GetOptions{}) + rs, err = clientset.AppsV1().ReplicaSets("default").Get(ctx, rs.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) rs2 := rs.DeepCopy() rs2.SetLabels(map[string]string{"biz": "baz"}) - q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") - instance := &source.Informer{Informer: depInformer} - err = instance.Start(handler.Funcs{ - CreateFunc: func(evt event.CreateEvent, q2 workqueue.RateLimitingInterface) { - }, - UpdateFunc: func(evt event.UpdateEvent, q2 workqueue.RateLimitingInterface) { - defer GinkgoRecover() - var err error - rs2, err = clientset.AppsV1().ReplicaSets("default").Get(rs.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + q := workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[reconcile.Request](), + workqueue.TypedRateLimitingQueueConfig[reconcile.Request]{ + Name: "test", + }) + instance := &source.Informer{ + Informer: depInformer, + Handler: handler.Funcs{ + CreateFunc: func(ctx context.Context, evt event.CreateEvent, q2 workqueue.TypedRateLimitingInterface[reconcile.Request]) { + }, + UpdateFunc: func(ctx context.Context, evt event.UpdateEvent, q2 workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + var err error + rs2, err := clientset.AppsV1().ReplicaSets("default").Get(ctx, rs.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) - Expect(q2).To(Equal(q)) - Expect(evt.MetaOld).To(Equal(rs)) - Expect(evt.ObjectOld).To(Equal(rs)) + Expect(q2).To(Equal(q)) + Expect(evt.ObjectOld).To(Equal(rs)) - Expect(evt.MetaNew).To(Equal(rs2)) - Expect(evt.ObjectNew).To(Equal(rs2)) + Expect(evt.ObjectNew).To(Equal(rs2)) - close(c) - }, - DeleteFunc: func(event.DeleteEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected DeleteEvent") - }, - GenericFunc: func(event.GenericEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected GenericEvent") + close(c) + }, + DeleteFunc: func(context.Context, event.DeleteEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + Fail("Unexpected DeleteEvent") + }, + GenericFunc: func(context.Context, event.GenericEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + Fail("Unexpected GenericEvent") + }, }, - }, q) + } + err = instance.Start(ctx, q) Expect(err).NotTo(HaveOccurred()) - rs2, err = clientset.AppsV1().ReplicaSets("default").Update(rs2) + _, err = clientset.AppsV1().ReplicaSets("default").Update(ctx, rs2, metav1.UpdateOptions{}) Expect(err).NotTo(HaveOccurred()) <-c - close(done) }) - It("should provide a ReplicaSet DeletedEvent", func(done Done) { + It("should provide a ReplicaSet DeletedEvent", func(ctx SpecContext) { c := make(chan struct{}) - q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") - instance := &source.Informer{Informer: depInformer} - err := instance.Start(handler.Funcs{ - CreateFunc: func(event.CreateEvent, workqueue.RateLimitingInterface) { - }, - UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { - }, - DeleteFunc: func(evt event.DeleteEvent, q2 workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Expect(q2).To(Equal(q)) - Expect(evt.Meta.GetName()).To(Equal(rs.Name)) - close(c) - }, - GenericFunc: func(event.GenericEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected GenericEvent") + q := workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[reconcile.Request](), + workqueue.TypedRateLimitingQueueConfig[reconcile.Request]{ + Name: "test", + }) + instance := &source.Informer{ + Informer: depInformer, + Handler: handler.Funcs{ + CreateFunc: func(context.Context, event.CreateEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { + }, + UpdateFunc: func(context.Context, event.UpdateEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { + }, + DeleteFunc: func(ctx context.Context, evt event.DeleteEvent, q2 workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + Expect(q2).To(Equal(q)) + Expect(evt.Object.GetName()).To(Equal(rs.Name)) + close(c) + }, + GenericFunc: func(context.Context, event.GenericEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + Fail("Unexpected GenericEvent") + }, }, - }, q) + } + err := instance.Start(ctx, q) Expect(err).NotTo(HaveOccurred()) - err = clientset.AppsV1().ReplicaSets("default").Delete(rs.Name, &metav1.DeleteOptions{}) + err = clientset.AppsV1().ReplicaSets("default").Delete(ctx, rs.Name, metav1.DeleteOptions{}) Expect(err).NotTo(HaveOccurred()) <-c - close(done) }) }) }) diff --git a/pkg/source/source_suite_test.go b/pkg/source/source_suite_test.go index cdad0aa536..774d978ca3 100644 --- a/pkg/source/source_suite_test.go +++ b/pkg/source/source_suite_test.go @@ -17,9 +17,10 @@ limitations under the License. package source_test import ( + "context" "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -31,18 +32,20 @@ import ( func TestSource(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, "Source Suite", []Reporter{envtest.NewlineReporter{}}) + RunSpecs(t, "Source Suite") } var testenv *envtest.Environment var config *rest.Config var clientset *kubernetes.Clientset var icache cache.Cache -var stop chan struct{} +var cancel context.CancelFunc -var _ = BeforeSuite(func(done Done) { - stop = make(chan struct{}) - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) +var _ = BeforeSuite(func() { + var ctx context.Context + // Has to be derived from context.Background, as it stays valid past the BeforeSuite + ctx, cancel = context.WithCancel(context.Background()) //nolint:forbidigo + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) testenv = &envtest.Environment{} @@ -58,15 +61,11 @@ var _ = BeforeSuite(func(done Done) { go func() { defer GinkgoRecover() - Expect(icache.Start(stop)).NotTo(HaveOccurred()) + Expect(icache.Start(ctx)).NotTo(HaveOccurred()) }() +}) - close(done) -}, 60) - -var _ = AfterSuite(func(done Done) { - close(stop) +var _ = AfterSuite(func() { + cancel() Expect(testenv.Stop()).To(Succeed()) - - close(done) -}, 5) +}) diff --git a/pkg/source/source_test.go b/pkg/source/source_test.go index 3e67bdfeed..ad311d73b2 100644 --- a/pkg/source/source_test.go +++ b/pkg/source/source_test.go @@ -17,15 +17,19 @@ limitations under the License. package source_test import ( + "context" "fmt" + "time" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/cache/informertest" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" corev1 "k8s.io/api/core/v1" @@ -39,7 +43,7 @@ var _ = Describe("Source", func() { var p *corev1.Pod var ic *informertest.FakeInformers - BeforeEach(func(done Done) { + BeforeEach(func() { ic = &informertest.FakeInformers{} c = make(chan struct{}) p = &corev1.Pod{ @@ -49,11 +53,10 @@ var _ = Describe("Source", func() { }, }, } - close(done) }) Context("for a Pod resource", func() { - It("should provide a Pod CreateEvent", func(done Done) { + It("should provide a Pod CreateEvent", func(ctx SpecContext) { c := make(chan struct{}) p := &corev1.Pod{ Spec: corev1.PodSpec{ @@ -63,88 +66,87 @@ var _ = Describe("Source", func() { }, } - q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") - instance := &source.Kind{ - Type: &corev1.Pod{}, - } - Expect(inject.CacheInto(ic, instance)).To(BeTrue()) - err := instance.Start(handler.Funcs{ - CreateFunc: func(evt event.CreateEvent, q2 workqueue.RateLimitingInterface) { + q := workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[reconcile.Request](), + workqueue.TypedRateLimitingQueueConfig[reconcile.Request]{ + Name: "test", + }) + instance := source.Kind(ic, &corev1.Pod{}, handler.TypedFuncs[*corev1.Pod, reconcile.Request]{ + CreateFunc: func(ctx context.Context, evt event.TypedCreateEvent[*corev1.Pod], q2 workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Expect(q2).To(Equal(q)) - Expect(evt.Meta).To(Equal(p)) Expect(evt.Object).To(Equal(p)) close(c) }, - UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { + UpdateFunc: func(context.Context, event.TypedUpdateEvent[*corev1.Pod], workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Fail("Unexpected UpdateEvent") }, - DeleteFunc: func(event.DeleteEvent, workqueue.RateLimitingInterface) { + DeleteFunc: func(context.Context, event.TypedDeleteEvent[*corev1.Pod], workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Fail("Unexpected DeleteEvent") }, - GenericFunc: func(event.GenericEvent, workqueue.RateLimitingInterface) { + GenericFunc: func(context.Context, event.TypedGenericEvent[*corev1.Pod], workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Fail("Unexpected GenericEvent") }, - }, q) + }) + err := instance.Start(ctx, q) Expect(err).NotTo(HaveOccurred()) + Expect(instance.WaitForSync(ctx)).NotTo(HaveOccurred()) - i, err := ic.FakeInformerFor(&corev1.Pod{}) + i, err := ic.FakeInformerFor(ctx, &corev1.Pod{}) Expect(err).NotTo(HaveOccurred()) i.Add(p) <-c - close(done) }) - It("should provide a Pod UpdateEvent", func(done Done) { + It("should provide a Pod UpdateEvent", func(ctx SpecContext) { p2 := p.DeepCopy() p2.SetLabels(map[string]string{"biz": "baz"}) ic := &informertest.FakeInformers{} - q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") - instance := &source.Kind{ - Type: &corev1.Pod{}, - } - Expect(instance.InjectCache(ic)).To(Succeed()) - err := instance.Start(handler.Funcs{ - CreateFunc: func(evt event.CreateEvent, q2 workqueue.RateLimitingInterface) { + q := workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[reconcile.Request](), + workqueue.TypedRateLimitingQueueConfig[reconcile.Request]{ + Name: "test", + }) + instance := source.Kind(ic, &corev1.Pod{}, handler.TypedFuncs[*corev1.Pod, reconcile.Request]{ + CreateFunc: func(ctx context.Context, evt event.TypedCreateEvent[*corev1.Pod], q2 workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Fail("Unexpected CreateEvent") }, - UpdateFunc: func(evt event.UpdateEvent, q2 workqueue.RateLimitingInterface) { + UpdateFunc: func(ctx context.Context, evt event.TypedUpdateEvent[*corev1.Pod], q2 workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Expect(q2).To(BeIdenticalTo(q)) - Expect(evt.MetaOld).To(Equal(p)) Expect(evt.ObjectOld).To(Equal(p)) - Expect(evt.MetaNew).To(Equal(p2)) Expect(evt.ObjectNew).To(Equal(p2)) close(c) }, - DeleteFunc: func(event.DeleteEvent, workqueue.RateLimitingInterface) { + DeleteFunc: func(context.Context, event.TypedDeleteEvent[*corev1.Pod], workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Fail("Unexpected DeleteEvent") }, - GenericFunc: func(event.GenericEvent, workqueue.RateLimitingInterface) { + GenericFunc: func(context.Context, event.TypedGenericEvent[*corev1.Pod], workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Fail("Unexpected GenericEvent") }, - }, q) + }) + err := instance.Start(ctx, q) Expect(err).NotTo(HaveOccurred()) + Expect(instance.WaitForSync(ctx)).NotTo(HaveOccurred()) - i, err := ic.FakeInformerFor(&corev1.Pod{}) + i, err := ic.FakeInformerFor(ctx, &corev1.Pod{}) Expect(err).NotTo(HaveOccurred()) i.Update(p, p2) <-c - close(done) }) - It("should provide a Pod DeletedEvent", func(done Done) { + It("should provide a Pod DeletedEvent", func(ctx SpecContext) { c := make(chan struct{}) p := &corev1.Pod{ Spec: corev1.PodSpec{ @@ -154,119 +156,138 @@ var _ = Describe("Source", func() { }, } - q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") - instance := &source.Kind{ - Type: &corev1.Pod{}, - } - Expect(inject.CacheInto(ic, instance)).To(BeTrue()) - err := instance.Start(handler.Funcs{ - CreateFunc: func(event.CreateEvent, workqueue.RateLimitingInterface) { + q := workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[reconcile.Request](), + workqueue.TypedRateLimitingQueueConfig[reconcile.Request]{ + Name: "test", + }) + instance := source.Kind(ic, &corev1.Pod{}, handler.TypedFuncs[*corev1.Pod, reconcile.Request]{ + CreateFunc: func(context.Context, event.TypedCreateEvent[*corev1.Pod], workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Fail("Unexpected DeleteEvent") }, - UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { + UpdateFunc: func(context.Context, event.TypedUpdateEvent[*corev1.Pod], workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Fail("Unexpected UpdateEvent") }, - DeleteFunc: func(evt event.DeleteEvent, q2 workqueue.RateLimitingInterface) { + DeleteFunc: func(ctx context.Context, evt event.TypedDeleteEvent[*corev1.Pod], q2 workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Expect(q2).To(BeIdenticalTo(q)) - Expect(evt.Meta).To(Equal(p)) Expect(evt.Object).To(Equal(p)) close(c) }, - GenericFunc: func(event.GenericEvent, workqueue.RateLimitingInterface) { + GenericFunc: func(context.Context, event.TypedGenericEvent[*corev1.Pod], workqueue.TypedRateLimitingInterface[reconcile.Request]) { defer GinkgoRecover() Fail("Unexpected GenericEvent") }, - }, q) + }) + err := instance.Start(ctx, q) Expect(err).NotTo(HaveOccurred()) + Expect(instance.WaitForSync(ctx)).NotTo(HaveOccurred()) - i, err := ic.FakeInformerFor(&corev1.Pod{}) + i, err := ic.FakeInformerFor(ctx, &corev1.Pod{}) Expect(err).NotTo(HaveOccurred()) i.Delete(p) <-c - close(done) }) }) - It("should return an error from Start if informers were not injected", func(done Done) { - instance := source.Kind{Type: &corev1.Pod{}} - err := instance.Start(nil, nil) + It("should return an error from Start cache was not provided", func(ctx SpecContext) { + instance := source.Kind(nil, &corev1.Pod{}, nil) + err := instance.Start(ctx, nil) Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("must call CacheInto on Kind before calling Start")) + Expect(err.Error()).To(ContainSubstring("must create Kind with a non-nil cache")) + }) - close(done) + It("should return an error from Start if a type was not provided", func(ctx SpecContext) { + instance := source.Kind[client.Object](ic, nil, nil) + err := instance.Start(ctx, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("must create Kind with a non-nil object")) + }) + It("should return an error from Start if a handler was not provided", func(ctx SpecContext) { + instance := source.Kind(ic, &corev1.Pod{}, nil) + err := instance.Start(ctx, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("must create Kind with non-nil handler")) }) - It("should return an error from Start if a type was not provided", func(done Done) { - instance := source.Kind{} - Expect(instance.InjectCache(&informertest.FakeInformers{})).To(Succeed()) - err := instance.Start(nil, nil) + It("should return an error if syncing fails", func(ctx SpecContext) { + f := false + instance := source.Kind[client.Object](&informertest.FakeInformers{Synced: &f}, &corev1.Pod{}, &handler.EnqueueRequestForObject{}) + Expect(instance.Start(ctx, nil)).NotTo(HaveOccurred()) + err := instance.WaitForSync(ctx) Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("must specify Kind.Type")) + Expect(err.Error()).To(Equal("cache did not sync")) - close(done) }) Context("for a Kind not in the cache", func() { - It("should return an error when Start is called", func(done Done) { + It("should return an error when WaitForSync is called", func(specContext SpecContext) { ic.Error = fmt.Errorf("test error") - q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") + q := workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[reconcile.Request](), + workqueue.TypedRateLimitingQueueConfig[reconcile.Request]{ + Name: "test", + }) - instance := &source.Kind{ - Type: &corev1.Pod{}, - } - Expect(instance.InjectCache(ic)).To(Succeed()) - err := instance.Start(handler.Funcs{}, q) - Expect(err).To(HaveOccurred()) + ctx, cancel := context.WithTimeout(specContext, 2*time.Second) + defer cancel() - close(done) + instance := source.Kind(ic, &corev1.Pod{}, handler.TypedFuncs[*corev1.Pod, reconcile.Request]{}) + err := instance.Start(ctx, q) + Expect(err).NotTo(HaveOccurred()) + Eventually(instance.WaitForSync).WithArguments(ctx).Should(HaveOccurred()) }) }) + + It("should return an error if syncing fails", func(ctx SpecContext) { + f := false + instance := source.Kind[client.Object](&informertest.FakeInformers{Synced: &f}, &corev1.Pod{}, &handler.EnqueueRequestForObject{}) + Expect(instance.Start(ctx, nil)).NotTo(HaveOccurred()) + err := instance.WaitForSync(ctx) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(Equal("cache did not sync")) + + }) }) Describe("Func", func() { - It("should be called from Start", func(done Done) { + It("should be called from Start", func(ctx SpecContext) { run := false instance := source.Func(func( - handler.EventHandler, - workqueue.RateLimitingInterface, ...predicate.Predicate) error { + context.Context, + workqueue.TypedRateLimitingInterface[reconcile.Request]) error { run = true return nil }) - Expect(instance.Start(nil, nil)).NotTo(HaveOccurred()) + Expect(instance.Start(ctx, nil)).NotTo(HaveOccurred()) Expect(run).To(BeTrue()) expected := fmt.Errorf("expected error: Func") instance = source.Func(func( - handler.EventHandler, - workqueue.RateLimitingInterface, ...predicate.Predicate) error { + context.Context, + workqueue.TypedRateLimitingInterface[reconcile.Request]) error { return expected }) - Expect(instance.Start(nil, nil)).To(Equal(expected)) - - close(done) + Expect(instance.Start(ctx, nil)).To(Equal(expected)) }) }) Describe("Channel", func() { - var stop chan struct{} var ch chan event.GenericEvent BeforeEach(func() { - stop = make(chan struct{}) ch = make(chan event.GenericEvent) }) AfterEach(func() { - close(stop) close(ch) }) Context("for a source", func() { - It("should provide a GenericEvent", func(done Done) { + It("should provide a GenericEvent", func(ctx SpecContext) { ch := make(chan event.GenericEvent) c := make(chan struct{}) p := &corev1.Pod{ @@ -274,7 +295,6 @@ var _ = Describe("Source", func() { } evt := event.GenericEvent{ Object: p, - Meta: p, } // Event that should be filtered out by predicates invalidEvt := event.GenericEvent{} @@ -282,81 +302,91 @@ var _ = Describe("Source", func() { // Predicate to filter out empty event prct := predicate.Funcs{ GenericFunc: func(e event.GenericEvent) bool { - return e.Object != nil && e.Meta != nil + return e.Object != nil }, } - q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") - instance := &source.Channel{Source: ch} - Expect(inject.StopChannelInto(stop, instance)).To(BeTrue()) - err := instance.Start(handler.Funcs{ - CreateFunc: func(event.CreateEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected CreateEvent") - }, - UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected UpdateEvent") - }, - DeleteFunc: func(event.DeleteEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected DeleteEvent") - }, - GenericFunc: func(evt event.GenericEvent, q2 workqueue.RateLimitingInterface) { - defer GinkgoRecover() - // The empty event should have been filtered out by the predicates, - // and will not be passed to the handler. - Expect(q2).To(BeIdenticalTo(q)) - Expect(evt.Meta).To(Equal(p)) - Expect(evt.Object).To(Equal(p)) - close(c) + q := workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[reconcile.Request](), + workqueue.TypedRateLimitingQueueConfig[reconcile.Request]{ + Name: "test", + }) + instance := source.Channel( + ch, + handler.Funcs{ + CreateFunc: func(context.Context, event.CreateEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + Fail("Unexpected CreateEvent") + }, + UpdateFunc: func(context.Context, event.UpdateEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + Fail("Unexpected UpdateEvent") + }, + DeleteFunc: func(context.Context, event.DeleteEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + Fail("Unexpected DeleteEvent") + }, + GenericFunc: func(ctx context.Context, evt event.GenericEvent, q2 workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + // The empty event should have been filtered out by the predicates, + // and will not be passed to the handler. + Expect(q2).To(BeIdenticalTo(q)) + Expect(evt.Object).To(Equal(p)) + close(c) + }, }, - }, q, prct) + source.WithPredicates[client.Object, reconcile.Request](prct), + ) + err := instance.Start(ctx, q) Expect(err).NotTo(HaveOccurred()) ch <- invalidEvt ch <- evt <-c - close(done) }) - It("should get pending events processed once channel unblocked", func(done Done) { + It("should get pending events processed once channel unblocked", func(ctx SpecContext) { ch := make(chan event.GenericEvent) unblock := make(chan struct{}) processed := make(chan struct{}) evt := event.GenericEvent{} eventCount := 0 - q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") + q := workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[reconcile.Request](), + workqueue.TypedRateLimitingQueueConfig[reconcile.Request]{ + Name: "test", + }) // Add a handler to get distribution blocked - instance := &source.Channel{Source: ch} - instance.DestBufferSize = 1 - Expect(inject.StopChannelInto(stop, instance)).To(BeTrue()) - err := instance.Start(handler.Funcs{ - CreateFunc: func(event.CreateEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected CreateEvent") - }, - UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected UpdateEvent") - }, - DeleteFunc: func(event.DeleteEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected DeleteEvent") - }, - GenericFunc: func(evt event.GenericEvent, q2 workqueue.RateLimitingInterface) { - defer GinkgoRecover() - // Block for the first time - if eventCount == 0 { - <-unblock - } - eventCount++ - - if eventCount == 3 { - close(processed) - } + instance := source.Channel( + ch, + handler.Funcs{ + CreateFunc: func(context.Context, event.CreateEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + Fail("Unexpected CreateEvent") + }, + UpdateFunc: func(context.Context, event.UpdateEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + Fail("Unexpected UpdateEvent") + }, + DeleteFunc: func(context.Context, event.DeleteEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + Fail("Unexpected DeleteEvent") + }, + GenericFunc: func(ctx context.Context, evt event.GenericEvent, q2 workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + // Block for the first time + if eventCount == 0 { + <-unblock + } + eventCount++ + + if eventCount == 3 { + close(processed) + } + }, }, - }, q) + ) + err := instance.Start(ctx, q) Expect(err).NotTo(HaveOccurred()) // Write 3 events into the source channel. @@ -376,99 +406,105 @@ var _ = Describe("Source", func() { // Validate all of the events have been processed. Expect(eventCount).To(Equal(3)) - - close(done) - }) - It("should get error if no source specified", func(done Done) { - q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") - instance := &source.Channel{ /*no source specified*/ } - Expect(inject.StopChannelInto(stop, instance)).To(BeTrue()) - err := instance.Start(handler.Funcs{}, q) - Expect(err).To(Equal(fmt.Errorf("must specify Channel.Source"))) - close(done) - }) - It("should get error if no stop channel injected", func(done Done) { - q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") - instance := &source.Channel{Source: ch} - err := instance.Start(handler.Funcs{}, q) - Expect(err).To(Equal(fmt.Errorf("must call InjectStop on Channel before calling Start"))) - close(done) }) + It("should be able to cope with events in the channel before the source is started", func(ctx SpecContext) { + ch := make(chan event.GenericEvent, 1) + processed := make(chan struct{}) + evt := event.GenericEvent{} + ch <- evt - }) - Context("for multi sources (handlers)", func() { - It("should provide GenericEvents for all handlers", func(done Done) { - ch := make(chan event.GenericEvent) - p := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"}, - } - evt := event.GenericEvent{ - Object: p, - Meta: p, - } - - var resEvent1, resEvent2 event.GenericEvent - c1 := make(chan struct{}) - c2 := make(chan struct{}) + q := workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[reconcile.Request](), + workqueue.TypedRateLimitingQueueConfig[reconcile.Request]{ + Name: "test", + }) + // Add a handler to get distribution blocked + instance := source.Channel( + ch, + handler.Funcs{ + CreateFunc: func(context.Context, event.CreateEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + Fail("Unexpected CreateEvent") + }, + UpdateFunc: func(context.Context, event.UpdateEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + Fail("Unexpected UpdateEvent") + }, + DeleteFunc: func(context.Context, event.DeleteEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + Fail("Unexpected DeleteEvent") + }, + GenericFunc: func(ctx context.Context, evt event.GenericEvent, q2 workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() - q := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "test") - instance := &source.Channel{Source: ch} - Expect(inject.StopChannelInto(stop, instance)).To(BeTrue()) - err := instance.Start(handler.Funcs{ - CreateFunc: func(event.CreateEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected CreateEvent") - }, - UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected UpdateEvent") - }, - DeleteFunc: func(event.DeleteEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected DeleteEvent") - }, - GenericFunc: func(evt event.GenericEvent, q2 workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Expect(q2).To(BeIdenticalTo(q)) - Expect(evt.Meta).To(Equal(p)) - Expect(evt.Object).To(Equal(p)) - resEvent1 = evt - close(c1) + close(processed) + }, }, - }, q) - Expect(err).NotTo(HaveOccurred()) + ) - err = instance.Start(handler.Funcs{ - CreateFunc: func(event.CreateEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected CreateEvent") - }, - UpdateFunc: func(event.UpdateEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected UpdateEvent") - }, - DeleteFunc: func(event.DeleteEvent, workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Fail("Unexpected DeleteEvent") - }, - GenericFunc: func(evt event.GenericEvent, q2 workqueue.RateLimitingInterface) { - defer GinkgoRecover() - Expect(q2).To(BeIdenticalTo(q)) - Expect(evt.Meta).To(Equal(p)) - Expect(evt.Object).To(Equal(p)) - resEvent2 = evt - close(c2) - }, - }, q) + err := instance.Start(ctx, q) Expect(err).NotTo(HaveOccurred()) + <-processed + }) + It("should stop when the source channel is closed", func(ctx SpecContext) { + q := workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[reconcile.Request](), + workqueue.TypedRateLimitingQueueConfig[reconcile.Request]{ + Name: "test", + }) + // if we didn't stop, we'd start spamming the queue with empty + // messages as we "received" a zero-valued GenericEvent from + // the source channel + + By("creating a channel with one element, then closing it") + ch := make(chan event.GenericEvent, 1) + evt := event.GenericEvent{} ch <- evt - <-c1 - <-c2 + close(ch) + + By("feeding that channel to a channel source") + processed := make(chan struct{}) + defer close(processed) + src := source.Channel( + ch, + handler.Funcs{ + CreateFunc: func(context.Context, event.CreateEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + Fail("Unexpected CreateEvent") + }, + UpdateFunc: func(context.Context, event.UpdateEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + Fail("Unexpected UpdateEvent") + }, + DeleteFunc: func(context.Context, event.DeleteEvent, workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() + Fail("Unexpected DeleteEvent") + }, + GenericFunc: func(ctx context.Context, evt event.GenericEvent, q2 workqueue.TypedRateLimitingInterface[reconcile.Request]) { + defer GinkgoRecover() - // Validate the two handlers received same event - Expect(resEvent1).To(Equal(resEvent2)) - close(done) + processed <- struct{}{} + }, + }, + ) + + err := src.Start(ctx, q) + Expect(err).NotTo(HaveOccurred()) + + By("expecting to only get one event") + Eventually(processed).Should(Receive()) + Consistently(processed).ShouldNot(Receive()) + }) + It("should get error if no source specified", func(ctx SpecContext) { + q := workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[reconcile.Request](), + workqueue.TypedRateLimitingQueueConfig[reconcile.Request]{ + Name: "test", + }) + instance := source.Channel[string](nil, nil /*no source specified*/) + err := instance.Start(ctx, q) + Expect(err).To(Equal(fmt.Errorf("must specify Channel.Source"))) }) }) }) diff --git a/pkg/webhook/admission/admission_suite_test.go b/pkg/webhook/admission/admission_suite_test.go index 42323177e6..f4e561b1b8 100644 --- a/pkg/webhook/admission/admission_suite_test.go +++ b/pkg/webhook/admission/admission_suite_test.go @@ -19,21 +19,18 @@ package admission import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" ) func TestAdmissionWebhook(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, "application Suite", []Reporter{envtest.NewlineReporter{}}) + RunSpecs(t, "Admission Webhook Suite") } -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) - - close(done) -}, 60) +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) +}) diff --git a/pkg/webhook/admission/decode.go b/pkg/webhook/admission/decode.go index e3257a1078..55f1cafb5e 100644 --- a/pkg/webhook/admission/decode.go +++ b/pkg/webhook/admission/decode.go @@ -17,7 +17,8 @@ limitations under the License. package admission import ( - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "fmt" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/json" @@ -25,23 +26,45 @@ import ( // Decoder knows how to decode the contents of an admission // request into a concrete object. -type Decoder struct { +type Decoder interface { + // Decode decodes the inlined object in the AdmissionRequest into the passed-in runtime.Object. + // If you want decode the OldObject in the AdmissionRequest, use DecodeRaw. + // It errors out if req.Object.Raw is empty i.e. containing 0 raw bytes. + Decode(req Request, into runtime.Object) error + + // DecodeRaw decodes a RawExtension object into the passed-in runtime.Object. + // It errors out if rawObj is empty i.e. containing 0 raw bytes. + DecodeRaw(rawObj runtime.RawExtension, into runtime.Object) error +} + +// decoder knows how to decode the contents of an admission +// request into a concrete object. +type decoder struct { codecs serializer.CodecFactory } -// NewDecoder creates a Decoder given the runtime.Scheme -func NewDecoder(scheme *runtime.Scheme) (*Decoder, error) { - return &Decoder{codecs: serializer.NewCodecFactory(scheme)}, nil +// NewDecoder creates a decoder given the runtime.Scheme. +func NewDecoder(scheme *runtime.Scheme) Decoder { + if scheme == nil { + panic("scheme should never be nil") + } + return &decoder{codecs: serializer.NewCodecFactory(scheme)} } // Decode decodes the inlined object in the AdmissionRequest into the passed-in runtime.Object. // If you want decode the OldObject in the AdmissionRequest, use DecodeRaw. -func (d *Decoder) Decode(req Request, into runtime.Object) error { +// It errors out if req.Object.Raw is empty i.e. containing 0 raw bytes. +func (d *decoder) Decode(req Request, into runtime.Object) error { + // we error out if rawObj is an empty object. + if len(req.Object.Raw) == 0 { + return fmt.Errorf("there is no content to decode") + } return d.DecodeRaw(req.Object, into) } // DecodeRaw decodes a RawExtension object into the passed-in runtime.Object. -func (d *Decoder) DecodeRaw(rawObj runtime.RawExtension, into runtime.Object) error { +// It errors out if rawObj is empty i.e. containing 0 raw bytes. +func (d *decoder) DecodeRaw(rawObj runtime.RawExtension, into runtime.Object) error { // NB(directxman12): there's a bug/weird interaction between decoders and // the API server where the API server doesn't send a GVK on the embedded // objects, which means the unstructured decoder refuses to decode. It @@ -49,12 +72,18 @@ func (d *Decoder) DecodeRaw(rawObj runtime.RawExtension, into runtime.Object) er // and call unstructured's special Unmarshal implementation, which calls // back into that same decoder :-/ // See kubernetes/kubernetes#74373. - if unstructuredInto, isUnstructured := into.(*unstructured.Unstructured); isUnstructured { + + // we error out if rawObj is an empty object. + if len(rawObj.Raw) == 0 { + return fmt.Errorf("there is no content to decode") + } + if unstructuredInto, isUnstructured := into.(runtime.Unstructured); isUnstructured { // unmarshal into unstructured's underlying object to avoid calling the decoder - if err := json.Unmarshal(rawObj.Raw, &unstructuredInto.Object); err != nil { + var object map[string]interface{} + if err := json.Unmarshal(rawObj.Raw, &object); err != nil { return err } - + unstructuredInto.SetUnstructuredContent(object) return nil } diff --git a/pkg/webhook/admission/decode_test.go b/pkg/webhook/admission/decode_test.go index a9ad0875be..130308800f 100644 --- a/pkg/webhook/admission/decode_test.go +++ b/pkg/webhook/admission/decode_test.go @@ -17,10 +17,10 @@ limitations under the License. package admission import ( - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - admissionv1beta1 "k8s.io/api/admission/v1beta1" + admissionv1 "k8s.io/api/admission/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -29,17 +29,15 @@ import ( ) var _ = Describe("Admission Webhook Decoder", func() { - var decoder *Decoder + var decoder Decoder BeforeEach(func() { By("creating a new decoder for a scheme") - var err error - decoder, err = NewDecoder(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) + decoder = NewDecoder(scheme.Scheme) Expect(decoder).NotTo(BeNil()) }) req := Request{ - AdmissionRequest: admissionv1beta1.AdmissionRequest{ + AdmissionRequest: admissionv1.AdmissionRequest{ Object: runtime.RawExtension{ Raw: []byte(`{ "apiVersion": "v1", @@ -125,6 +123,8 @@ var _ = Describe("Admission Webhook Decoder", func() { })) }) + // NOTE: This will only pass if a GVK is provided. An unstructered object without a GVK may succeed + // in decoding to an alternate type. It("should fail to decode if the object in the request doesn't match the passed-in type", func() { By("trying to extract a pod from the quest into a node") Expect(decoder.Decode(req, &corev1.Node{})).NotTo(Succeed()) @@ -154,4 +154,35 @@ var _ = Describe("Admission Webhook Decoder", func() { "namespace": "default", })) }) + + req2 := Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: "CREATE", + Object: runtime.RawExtension{ + Raw: []byte(`{ + "metadata": { + "name": "foo", + "namespace": "default" + }, + "spec": { + "containers": [ + { + "image": "bar:v2", + "name": "bar" + } + ] + } + }`), + }, + OldObject: runtime.RawExtension{ + Object: nil, + }, + }, + } + + It("should decode a valid admission request without GVK", func() { + By("extracting the object from the request") + var target3 unstructured.Unstructured + Expect(decoder.DecodeRaw(req2.Object, &target3)).To(Succeed()) + }) }) diff --git a/pkg/webhook/admission/defaulter.go b/pkg/webhook/admission/defaulter.go deleted file mode 100644 index 8b255894ba..0000000000 --- a/pkg/webhook/admission/defaulter.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package admission - -import ( - "context" - "encoding/json" - "net/http" - - "k8s.io/apimachinery/pkg/runtime" -) - -// Defaulter defines functions for setting defaults on resources -type Defaulter interface { - runtime.Object - Default() -} - -// DefaultingWebhookFor creates a new Webhook for Defaulting the provided type. -func DefaultingWebhookFor(defaulter Defaulter) *Webhook { - return &Webhook{ - Handler: &mutatingHandler{defaulter: defaulter}, - } -} - -type mutatingHandler struct { - defaulter Defaulter - decoder *Decoder -} - -var _ DecoderInjector = &mutatingHandler{} - -// InjectDecoder injects the decoder into a mutatingHandler. -func (h *mutatingHandler) InjectDecoder(d *Decoder) error { - h.decoder = d - return nil -} - -// Handle handles admission requests. -func (h *mutatingHandler) Handle(ctx context.Context, req Request) Response { - if h.defaulter == nil { - panic("defaulter should never be nil") - } - - // Get the object in the request - obj := h.defaulter.DeepCopyObject().(Defaulter) - err := h.decoder.Decode(req, obj) - if err != nil { - return Errored(http.StatusBadRequest, err) - } - - // Default the object - obj.Default() - marshalled, err := json.Marshal(obj) - if err != nil { - return Errored(http.StatusInternalServerError, err) - } - - // Create the patch - return PatchResponseFromRaw(req.Object.Raw, marshalled) -} diff --git a/pkg/webhook/admission/defaulter_custom.go b/pkg/webhook/admission/defaulter_custom.go new file mode 100644 index 0000000000..a703cbd2c5 --- /dev/null +++ b/pkg/webhook/admission/defaulter_custom.go @@ -0,0 +1,165 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "slices" + + "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" +) + +// CustomDefaulter defines functions for setting defaults on resources. +type CustomDefaulter interface { + Default(ctx context.Context, obj runtime.Object) error +} + +type defaulterOptions struct { + removeUnknownOrOmitableFields bool +} + +// DefaulterOption defines the type of a CustomDefaulter's option +type DefaulterOption func(*defaulterOptions) + +// DefaulterRemoveUnknownOrOmitableFields makes the defaulter prune fields that are in the json object retrieved by the +// webhook but not in the local go type json representation. This happens for example when the CRD in the apiserver has +// fields that our go type doesn't know about, because it's outdated, or the field has a zero value and is `omitempty`. +func DefaulterRemoveUnknownOrOmitableFields(o *defaulterOptions) { + o.removeUnknownOrOmitableFields = true +} + +// WithCustomDefaulter creates a new Webhook for a CustomDefaulter interface. +func WithCustomDefaulter(scheme *runtime.Scheme, obj runtime.Object, defaulter CustomDefaulter, opts ...DefaulterOption) *Webhook { + options := &defaulterOptions{} + for _, o := range opts { + o(options) + } + return &Webhook{ + Handler: &defaulterForType{ + object: obj, + defaulter: defaulter, + decoder: NewDecoder(scheme), + removeUnknownOrOmitableFields: options.removeUnknownOrOmitableFields, + }, + } +} + +type defaulterForType struct { + defaulter CustomDefaulter + object runtime.Object + decoder Decoder + removeUnknownOrOmitableFields bool +} + +// Handle handles admission requests. +func (h *defaulterForType) Handle(ctx context.Context, req Request) Response { + if h.decoder == nil { + panic("decoder should never be nil") + } + if h.defaulter == nil { + panic("defaulter should never be nil") + } + if h.object == nil { + panic("object should never be nil") + } + + // Always skip when a DELETE operation received in custom mutation handler. + if req.Operation == admissionv1.Delete { + return Response{AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + Result: &metav1.Status{ + Code: http.StatusOK, + }, + }} + } + + ctx = NewContextWithRequest(ctx, req) + + // Get the object in the request + obj := h.object.DeepCopyObject() + if err := h.decoder.Decode(req, obj); err != nil { + return Errored(http.StatusBadRequest, err) + } + + // Keep a copy of the object if needed + var originalObj runtime.Object + if !h.removeUnknownOrOmitableFields { + originalObj = obj.DeepCopyObject() + } + + // Default the object + if err := h.defaulter.Default(ctx, obj); err != nil { + var apiStatus apierrors.APIStatus + if errors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()) + } + return Denied(err.Error()) + } + + // Create the patch + marshalled, err := json.Marshal(obj) + if err != nil { + return Errored(http.StatusInternalServerError, err) + } + + handlerResponse := PatchResponseFromRaw(req.Object.Raw, marshalled) + if !h.removeUnknownOrOmitableFields { + handlerResponse = h.dropSchemeRemovals(handlerResponse, originalObj, req.Object.Raw) + } + return handlerResponse +} + +func (h *defaulterForType) dropSchemeRemovals(r Response, original runtime.Object, raw []byte) Response { + const opRemove = "remove" + if !r.Allowed || r.PatchType == nil { + return r + } + + // If we don't have removals in the patch. + if !slices.ContainsFunc(r.Patches, func(o jsonpatch.JsonPatchOperation) bool { return o.Operation == opRemove }) { + return r + } + + // Get the raw to original patch + marshalledOriginal, err := json.Marshal(original) + if err != nil { + return Errored(http.StatusInternalServerError, err) + } + + patchOriginal, err := jsonpatch.CreatePatch(raw, marshalledOriginal) + if err != nil { + return Errored(http.StatusInternalServerError, err) + } + removedByScheme := sets.New(slices.DeleteFunc(patchOriginal, func(p jsonpatch.JsonPatchOperation) bool { return p.Operation != opRemove })...) + + r.Patches = slices.DeleteFunc(r.Patches, func(p jsonpatch.JsonPatchOperation) bool { + return p.Operation == opRemove && removedByScheme.Has(p) + }) + + if len(r.Patches) == 0 { + r.PatchType = nil + } + return r +} diff --git a/pkg/webhook/admission/defaulter_custom_test.go b/pkg/webhook/admission/defaulter_custom_test.go new file mode 100644 index 0000000000..1bc26e59f4 --- /dev/null +++ b/pkg/webhook/admission/defaulter_custom_test.go @@ -0,0 +1,169 @@ +/* +Copyright 2021 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "maps" + "net/http" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "gomodules.xyz/jsonpatch/v2" + + admissionv1 "k8s.io/api/admission/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var _ = Describe("Defaulter Handler", func() { + + It("should remove unknown fields when DefaulterRemoveUnknownFields is passed", func(ctx SpecContext) { + obj := &TestDefaulter{} + handler := WithCustomDefaulter(admissionScheme, obj, &TestCustomDefaulter{}, DefaulterRemoveUnknownOrOmitableFields) + + resp := handler.Handle(ctx, Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + Object: runtime.RawExtension{ + Raw: []byte(`{"newField":"foo", "totalReplicas":5}`), + }, + }, + }) + Expect(resp.Allowed).Should(BeTrue()) + Expect(resp.Patches).To(HaveLen(4)) + Expect(resp.Patches).To(ContainElements( + jsonpatch.JsonPatchOperation{ + Operation: "add", + Path: "/labels", + Value: map[string]any{"foo": "bar"}, + }, + jsonpatch.JsonPatchOperation{ + Operation: "add", + Path: "/replica", + Value: 2.0, + }, + jsonpatch.JsonPatchOperation{ + Operation: "remove", + Path: "/newField", + }, + jsonpatch.JsonPatchOperation{ + Operation: "remove", + Path: "/totalReplicas", + }, + )) + Expect(resp.Result.Code).Should(Equal(int32(http.StatusOK))) + }) + + It("should preserve unknown fields by default", func(ctx SpecContext) { + obj := &TestDefaulter{} + handler := WithCustomDefaulter(admissionScheme, obj, &TestCustomDefaulter{}) + + resp := handler.Handle(ctx, Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + Object: runtime.RawExtension{ + Raw: []byte(`{"newField":"foo", "totalReplicas":5}`), + }, + }, + }) + Expect(resp.Allowed).Should(BeTrue()) + Expect(resp.Patches).To(HaveLen(3)) + Expect(resp.Patches).To(ContainElements( + jsonpatch.JsonPatchOperation{ + Operation: "add", + Path: "/labels", + Value: map[string]any{"foo": "bar"}, + }, + jsonpatch.JsonPatchOperation{ + Operation: "add", + Path: "/replica", + Value: 2.0, + }, + jsonpatch.JsonPatchOperation{ + Operation: "remove", + Path: "/totalReplicas", + }, + )) + Expect(resp.Result.Code).Should(Equal(int32(http.StatusOK))) + }) + + It("should return ok if received delete verb in defaulter handler", func(ctx SpecContext) { + obj := &TestDefaulter{} + handler := WithCustomDefaulter(admissionScheme, obj, &TestCustomDefaulter{}) + resp := handler.Handle(ctx, Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Delete, + OldObject: runtime.RawExtension{ + Raw: []byte("{}"), + }, + }, + }) + Expect(resp.Allowed).Should(BeTrue()) + Expect(resp.Result.Code).Should(Equal(int32(http.StatusOK))) + }) +}) + +// TestDefaulter. +var _ runtime.Object = &TestDefaulter{} + +type TestDefaulter struct { + Labels map[string]string `json:"labels,omitempty"` + + Replica int `json:"replica,omitempty"` + TotalReplicas int `json:"totalReplicas,omitempty"` +} + +var testDefaulterGVK = schema.GroupVersionKind{Group: "foo.test.org", Version: "v1", Kind: "TestDefaulter"} + +func (d *TestDefaulter) GetObjectKind() schema.ObjectKind { return d } +func (d *TestDefaulter) DeepCopyObject() runtime.Object { + return &TestDefaulter{ + Labels: maps.Clone(d.Labels), + Replica: d.Replica, + TotalReplicas: d.TotalReplicas, + } +} + +func (d *TestDefaulter) GroupVersionKind() schema.GroupVersionKind { + return testDefaulterGVK +} + +func (d *TestDefaulter) SetGroupVersionKind(gvk schema.GroupVersionKind) {} + +var _ runtime.Object = &TestDefaulterList{} + +type TestDefaulterList struct{} + +func (*TestDefaulterList) GetObjectKind() schema.ObjectKind { return nil } +func (*TestDefaulterList) DeepCopyObject() runtime.Object { return nil } + +// TestCustomDefaulter +type TestCustomDefaulter struct{} + +func (d *TestCustomDefaulter) Default(ctx context.Context, obj runtime.Object) error { + o := obj.(*TestDefaulter) + + if o.Labels == nil { + o.Labels = map[string]string{} + } + o.Labels["foo"] = "bar" + + if o.Replica < 2 { + o.Replica = 2 + } + o.TotalReplicas = 0 + return nil +} diff --git a/pkg/webhook/admission/doc.go b/pkg/webhook/admission/doc.go index 0b274dd02b..8dc0cbec6f 100644 --- a/pkg/webhook/admission/doc.go +++ b/pkg/webhook/admission/doc.go @@ -20,9 +20,3 @@ Package admission provides implementation for admission webhook and methods to i See examples/mutatingwebhook.go and examples/validatingwebhook.go for examples of admission webhooks. */ package admission - -import ( - logf "sigs.k8s.io/controller-runtime/pkg/internal/log" -) - -var log = logf.RuntimeLog.WithName("admission") diff --git a/pkg/webhook/admission/http.go b/pkg/webhook/admission/http.go index fa60302adc..f049fb66e6 100644 --- a/pkg/webhook/admission/http.go +++ b/pkg/webhook/admission/http.go @@ -17,17 +17,16 @@ limitations under the License. package admission import ( - "context" "encoding/json" "errors" "fmt" "io" - "io/ioutil" "net/http" + v1 "k8s.io/api/admission/v1" "k8s.io/api/admission/v1beta1" - admissionv1beta1 "k8s.io/api/admission/v1beta1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) @@ -35,67 +34,140 @@ import ( var admissionScheme = runtime.NewScheme() var admissionCodecs = serializer.NewCodecFactory(admissionScheme) +// adapted from https://github.com/kubernetes/kubernetes/blob/c28c2009181fcc44c5f6b47e10e62dacf53e4da0/staging/src/k8s.io/pod-security-admission/cmd/webhook/server/server.go +// +// From https://github.com/kubernetes/apiserver/blob/d6876a0600de06fef75968c4641c64d7da499f25/pkg/server/config.go#L433-L442C5: +// +// 1.5MB is the recommended client request size in byte +// the etcd server should accept. See +// https://github.com/etcd-io/etcd/blob/release-3.4/embed/config.go#L56. +// A request body might be encoded in json, and is converted to +// proto when persisted in etcd, so we allow 2x as the largest request +// body size to be accepted and decoded in a write request. +// +// For the admission request, we can infer that it contains at most two objects +// (the old and new versions of the object being admitted), each of which can +// be at most 3MB in size. For the rest of the request, we can assume that +// it will be less than 1MB in size. Therefore, we can set the max request +// size to 7MB. +// If your use case requires larger max request sizes, please +// open an issue (https://github.com/kubernetes-sigs/controller-runtime/issues/new). +const maxRequestSize = int64(7 * 1024 * 1024) + func init() { - utilruntime.Must(admissionv1beta1.AddToScheme(admissionScheme)) + utilruntime.Must(v1.AddToScheme(admissionScheme)) + utilruntime.Must(v1beta1.AddToScheme(admissionScheme)) } var _ http.Handler = &Webhook{} func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { - var body []byte - var err error - - var reviewResponse Response - if r.Body != nil { - if body, err = ioutil.ReadAll(r.Body); err != nil { - wh.log.Error(err, "unable to read the body from the incoming request") - reviewResponse = Errored(http.StatusBadRequest, err) - wh.writeResponse(w, reviewResponse) - return - } - } else { - err = errors.New("request body is empty") - wh.log.Error(err, "bad request") - reviewResponse = Errored(http.StatusBadRequest, err) - wh.writeResponse(w, reviewResponse) + ctx := r.Context() + if wh.WithContextFunc != nil { + ctx = wh.WithContextFunc(ctx, r) + } + + if r.Body == nil || r.Body == http.NoBody { + err := errors.New("request body is empty") + wh.getLogger(nil).Error(err, "bad request") + wh.writeResponse(w, Errored(http.StatusBadRequest, err)) + return + } + + defer r.Body.Close() + limitedReader := &io.LimitedReader{R: r.Body, N: maxRequestSize} + body, err := io.ReadAll(limitedReader) + if err != nil { + wh.getLogger(nil).Error(err, "unable to read the body from the incoming request") + wh.writeResponse(w, Errored(http.StatusBadRequest, err)) + return + } + if limitedReader.N <= 0 { + err := fmt.Errorf("request entity is too large; limit is %d bytes", maxRequestSize) + wh.getLogger(nil).Error(err, "unable to read the body from the incoming request; limit reached") + wh.writeResponse(w, Errored(http.StatusRequestEntityTooLarge, err)) return } // verify the content type is accurate - contentType := r.Header.Get("Content-Type") - if contentType != "application/json" { + if contentType := r.Header.Get("Content-Type"); contentType != "application/json" { err = fmt.Errorf("contentType=%s, expected application/json", contentType) - wh.log.Error(err, "unable to process a request with an unknown content type", "content type", contentType) - reviewResponse = Errored(http.StatusBadRequest, err) - wh.writeResponse(w, reviewResponse) + wh.getLogger(nil).Error(err, "unable to process a request with unknown content type") + wh.writeResponse(w, Errored(http.StatusBadRequest, err)) return } + // Both v1 and v1beta1 AdmissionReview types are exactly the same, so the v1beta1 type can + // be decoded into the v1 type. However the runtime codec's decoder guesses which type to + // decode into by type name if an Object's TypeMeta isn't set. By setting TypeMeta of an + // unregistered type to the v1 GVK, the decoder will coerce a v1beta1 AdmissionReview to v1. + // The actual AdmissionReview GVK will be used to write a typed response in case the + // webhook config permits multiple versions, otherwise this response will fail. req := Request{} - ar := v1beta1.AdmissionReview{ - // avoid an extra copy - Request: &req.AdmissionRequest, - } - if _, _, err := admissionCodecs.UniversalDeserializer().Decode(body, nil, &ar); err != nil { - wh.log.Error(err, "unable to decode the request") - reviewResponse = Errored(http.StatusBadRequest, err) - wh.writeResponse(w, reviewResponse) + ar := unversionedAdmissionReview{} + // avoid an extra copy + ar.Request = &req.AdmissionRequest + ar.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("AdmissionReview")) + _, actualAdmRevGVK, err := admissionCodecs.UniversalDeserializer().Decode(body, nil, &ar) + if err != nil { + wh.getLogger(nil).Error(err, "unable to decode the request") + wh.writeResponse(w, Errored(http.StatusBadRequest, err)) return } + wh.getLogger(&req).V(5).Info("received request") - // TODO: add panic-recovery for Handle - reviewResponse = wh.Handle(context.Background(), req) - wh.writeResponse(w, reviewResponse) + wh.writeResponseTyped(w, wh.Handle(ctx, req), actualAdmRevGVK) } +// writeResponse writes response to w generically, i.e. without encoding GVK information. func (wh *Webhook) writeResponse(w io.Writer, response Response) { - encoder := json.NewEncoder(w) - responseAdmissionReview := v1beta1.AdmissionReview{ + wh.writeAdmissionResponse(w, v1.AdmissionReview{Response: &response.AdmissionResponse}) +} + +// writeResponseTyped writes response to w with GVK set to admRevGVK, which is necessary +// if multiple AdmissionReview versions are permitted by the webhook. +func (wh *Webhook) writeResponseTyped(w io.Writer, response Response, admRevGVK *schema.GroupVersionKind) { + ar := v1.AdmissionReview{ Response: &response.AdmissionResponse, } - err := encoder.Encode(responseAdmissionReview) - if err != nil { - wh.log.Error(err, "unable to encode the response") - wh.writeResponse(w, Errored(http.StatusInternalServerError, err)) + // Default to a v1 AdmissionReview, otherwise the API server may not recognize the request + // if multiple AdmissionReview versions are permitted by the webhook config. + // TODO(estroz): this should be configurable since older API servers won't know about v1. + if admRevGVK == nil || *admRevGVK == (schema.GroupVersionKind{}) { + ar.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("AdmissionReview")) + } else { + ar.SetGroupVersionKind(*admRevGVK) } + wh.writeAdmissionResponse(w, ar) } + +// writeAdmissionResponse writes ar to w. +func (wh *Webhook) writeAdmissionResponse(w io.Writer, ar v1.AdmissionReview) { + if err := json.NewEncoder(w).Encode(ar); err != nil { + wh.getLogger(nil).Error(err, "unable to encode and write the response") + // Since the `ar v1.AdmissionReview` is a clear and legal object, + // it should not have problem to be marshalled into bytes. + // The error here is probably caused by the abnormal HTTP connection, + // e.g., broken pipe, so we can only write the error response once, + // to avoid endless circular calling. + serverError := Errored(http.StatusInternalServerError, err) + if err = json.NewEncoder(w).Encode(v1.AdmissionReview{Response: &serverError.AdmissionResponse}); err != nil { + wh.getLogger(nil).Error(err, "still unable to encode and write the InternalServerError response") + } + } else { + res := ar.Response + if log := wh.getLogger(nil); log.V(5).Enabled() { + if res.Result != nil { + log = log.WithValues("code", res.Result.Code, "reason", res.Result.Reason, "message", res.Result.Message) + } + log.V(5).Info("wrote response", "requestID", res.UID, "allowed", res.Allowed) + } + } +} + +// unversionedAdmissionReview is used to decode both v1 and v1beta1 AdmissionReview types. +type unversionedAdmissionReview struct { + v1.AdmissionReview +} + +var _ runtime.Object = &unversionedAdmissionReview{} diff --git a/pkg/webhook/admission/http_test.go b/pkg/webhook/admission/http_test.go index 975115695f..9cea9dd9e7 100644 --- a/pkg/webhook/admission/http_test.go +++ b/pkg/webhook/admission/http_test.go @@ -19,20 +19,26 @@ package admission import ( "bytes" "context" + "crypto/rand" + "fmt" "io" "net/http" "net/http/httptest" + "time" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" - - admissionv1beta1 "k8s.io/api/admission/v1beta1" + admissionv1 "k8s.io/api/admission/v1" ) var _ = Describe("Admission Webhooks", func() { + const ( + gvkJSONv1 = `"kind":"AdmissionReview","apiVersion":"admission.k8s.io/v1"` + gvkJSONv1beta1 = `"kind":"AdmissionReview","apiVersion":"admission.k8s.io/v1beta1"` + ) + Describe("HTTP Handler", func() { var respRecorder *httptest.ResponseRecorder webhook := &Webhook{ @@ -42,17 +48,15 @@ var _ = Describe("Admission Webhooks", func() { respRecorder = &httptest.ResponseRecorder{ Body: bytes.NewBuffer(nil), } - _, err := inject.LoggerInto(log.WithName("test-webhook"), webhook) - Expect(err).NotTo(HaveOccurred()) }) It("should return bad-request when given an empty body", func() { req := &http.Request{Body: nil} - expected := []byte(`{"response":{"uid":"","allowed":false,"status":{"metadata":{},"message":"request body is empty","code":400}}} -`) + expected := `{"response":{"uid":"","allowed":false,"status":{"metadata":{},"message":"request body is empty","code":400}}} +` webhook.ServeHTTP(respRecorder, req) - Expect(respRecorder.Body.Bytes()).To(Equal(expected)) + Expect(respRecorder.Body.String()).To(Equal(expected)) }) It("should return bad-request when given the wrong content-type", func() { @@ -61,10 +65,11 @@ var _ = Describe("Admission Webhooks", func() { Body: nopCloser{Reader: bytes.NewBuffer(nil)}, } - expected := []byte(`{"response":{"uid":"","allowed":false,"status":{"metadata":{},"message":"contentType=application/foo, expected application/json","code":400}}} -`) + expected := + `{"response":{"uid":"","allowed":false,"status":{"metadata":{},"message":"contentType=application/foo, expected application/json","code":400}}} +` webhook.ServeHTTP(respRecorder, req) - Expect(respRecorder.Body.Bytes()).To(Equal(expected)) + Expect(respRecorder.Body.String()).To(Equal(expected)) }) It("should return bad-request when given an undecodable body", func() { @@ -73,14 +78,40 @@ var _ = Describe("Admission Webhooks", func() { Body: nopCloser{Reader: bytes.NewBufferString("{")}, } - expected := []byte( + expected := `{"response":{"uid":"","allowed":false,"status":{"metadata":{},"message":"couldn't get version/kind; json parse error: unexpected end of JSON input","code":400}}} -`) +` + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should error when given a NoBody", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Method: http.MethodPost, + Body: http.NoBody, + } + + expected := `{"response":{"uid":"","allowed":false,"status":{"metadata":{},"message":"request body is empty","code":400}}} +` webhook.ServeHTTP(respRecorder, req) - Expect(respRecorder.Body.Bytes()).To(Equal(expected)) + Expect(respRecorder.Body.String()).To(Equal(expected)) }) - It("should return the response given by the handler", func() { + It("should error when given an infinite body", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Method: http.MethodPost, + Body: nopCloser{Reader: rand.Reader}, + } + + expected := `{"response":{"uid":"","allowed":false,"status":{"metadata":{},"message":"request entity is too large; limit is 7340032 bytes","code":413}}} +` + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should return the response given by the handler with version defaulted to v1", func() { req := &http.Request{ Header: http.Header{"Content-Type": []string{"application/json"}}, Body: nopCloser{Reader: bytes.NewBufferString(`{"request":{}}`)}, @@ -89,10 +120,110 @@ var _ = Describe("Admission Webhooks", func() { Handler: &fakeHandler{}, } - expected := []byte(`{"response":{"uid":"","allowed":true,"status":{"metadata":{},"code":200}}} -`) + expected := fmt.Sprintf(`{%s,"response":{"uid":"","allowed":true,"status":{"metadata":{},"code":200}}} +`, gvkJSONv1) webhook.ServeHTTP(respRecorder, req) - Expect(respRecorder.Body.Bytes()).To(Equal(expected)) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should return the v1 response given by the handler", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: nopCloser{Reader: bytes.NewBufferString(fmt.Sprintf(`{%s,"request":{}}`, gvkJSONv1))}, + } + webhook := &Webhook{ + Handler: &fakeHandler{}, + } + + expected := fmt.Sprintf(`{%s,"response":{"uid":"","allowed":true,"status":{"metadata":{},"code":200}}} +`, gvkJSONv1) + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should return the v1beta1 response given by the handler", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: nopCloser{Reader: bytes.NewBufferString(fmt.Sprintf(`{%s,"request":{}}`, gvkJSONv1beta1))}, + } + webhook := &Webhook{ + Handler: &fakeHandler{}, + } + + expected := fmt.Sprintf(`{%s,"response":{"uid":"","allowed":true,"status":{"metadata":{},"code":200}}} +`, gvkJSONv1beta1) + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should present the Context from the HTTP request, if any", func(specCtx SpecContext) { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: nopCloser{Reader: bytes.NewBufferString(`{"request":{}}`)}, + } + type ctxkey int + const key ctxkey = 1 + const value = "from-ctx" + webhook := &Webhook{ + Handler: &fakeHandler{ + fn: func(ctx context.Context, req Request) Response { + <-ctx.Done() + return Allowed(ctx.Value(key).(string)) + }, + }, + } + + expected := fmt.Sprintf(`{%s,"response":{"uid":"","allowed":true,"status":{"metadata":{},"message":%q,"code":200}}} +`, gvkJSONv1, value) + + ctx, cancel := context.WithCancel(context.WithValue(specCtx, key, value)) + cancel() + webhook.ServeHTTP(respRecorder, req.WithContext(ctx)) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should mutate the Context from the HTTP request, if func supplied", func(specCtx SpecContext) { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: nopCloser{Reader: bytes.NewBufferString(`{"request":{}}`)}, + } + type ctxkey int + const key ctxkey = 1 + webhook := &Webhook{ + Handler: &fakeHandler{ + fn: func(ctx context.Context, req Request) Response { + return Allowed(ctx.Value(key).(string)) + }, + }, + WithContextFunc: func(ctx context.Context, r *http.Request) context.Context { + return context.WithValue(ctx, key, r.Header["Content-Type"][0]) + }, + } + + expected := fmt.Sprintf(`{%s,"response":{"uid":"","allowed":true,"status":{"metadata":{},"message":%q,"code":200}}} +`, gvkJSONv1, "application/json") + + ctx, cancel := context.WithCancel(specCtx) + cancel() + webhook.ServeHTTP(respRecorder, req.WithContext(ctx)) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should never run into circular calling if the writer has broken", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: nopCloser{Reader: bytes.NewBufferString(fmt.Sprintf(`{%s,"request":{}}`, gvkJSONv1))}, + } + webhook := &Webhook{ + Handler: &fakeHandler{}, + } + + bw := &brokenWriter{ResponseWriter: respRecorder} + Eventually(func() int { + // This should not be blocked by the circular calling of writeResponse and writeAdmissionResponse + webhook.ServeHTTP(bw, req) + return respRecorder.Body.Len() + }, time.Second*3).Should(Equal(0)) }) }) }) @@ -104,20 +235,8 @@ type nopCloser struct { func (nopCloser) Close() error { return nil } type fakeHandler struct { - invoked bool - fn func(context.Context, Request) Response - decoder *Decoder - injectedString string -} - -func (h *fakeHandler) InjectDecoder(d *Decoder) error { - h.decoder = d - return nil -} - -func (h *fakeHandler) InjectString(s string) error { - h.injectedString = s - return nil + invoked bool + fn func(context.Context, Request) Response } func (h *fakeHandler) Handle(ctx context.Context, req Request) Response { @@ -125,7 +244,15 @@ func (h *fakeHandler) Handle(ctx context.Context, req Request) Response { if h.fn != nil { return h.fn(ctx, req) } - return Response{AdmissionResponse: admissionv1beta1.AdmissionResponse{ + return Response{AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: true, }} } + +type brokenWriter struct { + http.ResponseWriter +} + +func (bw *brokenWriter) Write(buf []byte) (int, error) { + return 0, fmt.Errorf("mock: write: broken pipe") +} diff --git a/pkg/webhook/admission/inject.go b/pkg/webhook/admission/inject.go deleted file mode 100644 index d5af0d598f..0000000000 --- a/pkg/webhook/admission/inject.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package admission - -// DecoderInjector is used by the ControllerManager to inject decoder into webhook handlers. -type DecoderInjector interface { - InjectDecoder(*Decoder) error -} - -// InjectDecoderInto will set decoder on i and return the result if it implements Decoder. Returns -// false if i does not implement Decoder. -func InjectDecoderInto(decoder *Decoder, i interface{}) (bool, error) { - if s, ok := i.(DecoderInjector); ok { - return true, s.InjectDecoder(decoder) - } - return false, nil -} diff --git a/pkg/webhook/admission/metrics/metrics.go b/pkg/webhook/admission/metrics/metrics.go new file mode 100644 index 0000000000..358a3a9162 --- /dev/null +++ b/pkg/webhook/admission/metrics/metrics.go @@ -0,0 +1,39 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +var ( + // WebhookPanics is a prometheus counter metrics which holds the total + // number of panics from webhooks. + WebhookPanics = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "controller_runtime_webhook_panics_total", + Help: "Total number of webhook panics", + }, []string{}) +) + +func init() { + metrics.Registry.MustRegister( + WebhookPanics, + ) + // Init metric. + WebhookPanics.WithLabelValues().Add(0) +} diff --git a/pkg/webhook/admission/multi.go b/pkg/webhook/admission/multi.go index a65be69f68..ef9c456248 100644 --- a/pkg/webhook/admission/multi.go +++ b/pkg/webhook/admission/multi.go @@ -22,60 +22,47 @@ import ( "fmt" "net/http" - "github.com/appscode/jsonpatch" - admissionv1beta1 "k8s.io/api/admission/v1beta1" + jsonpatch "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) type multiMutating []Handler func (hs multiMutating) Handle(ctx context.Context, req Request) Response { patches := []jsonpatch.JsonPatchOperation{} + warnings := []string{} for _, handler := range hs { resp := handler.Handle(ctx, req) if !resp.Allowed { return resp } - if resp.PatchType != nil && *resp.PatchType != admissionv1beta1.PatchTypeJSONPatch { + if resp.PatchType != nil && *resp.PatchType != admissionv1.PatchTypeJSONPatch { return Errored(http.StatusInternalServerError, fmt.Errorf("unexpected patch type returned by the handler: %v, only allow: %v", - resp.PatchType, admissionv1beta1.PatchTypeJSONPatch)) + resp.PatchType, admissionv1.PatchTypeJSONPatch)) } patches = append(patches, resp.Patches...) + warnings = append(warnings, resp.Warnings...) } var err error marshaledPatch, err := json.Marshal(patches) if err != nil { - return Errored(http.StatusBadRequest, fmt.Errorf("error when marshaling the patch: %v", err)) + return Errored(http.StatusBadRequest, fmt.Errorf("error when marshaling the patch: %w", err)) } return Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: true, Result: &metav1.Status{ Code: http.StatusOK, }, Patch: marshaledPatch, - PatchType: func() *admissionv1beta1.PatchType { pt := admissionv1beta1.PatchTypeJSONPatch; return &pt }(), + Warnings: warnings, + PatchType: func() *admissionv1.PatchType { pt := admissionv1.PatchTypeJSONPatch; return &pt }(), }, } } -// InjectFunc injects the field setter into the handlers. -func (hs multiMutating) InjectFunc(f inject.Func) error { - // inject directly into the handlers. It would be more correct - // to do this in a sync.Once in Handle (since we don't have some - // other start/finalize-type method), but it's more efficient to - // do it here, presumably. - for _, handler := range hs { - if err := f(handler); err != nil { - return err - } - } - - return nil -} - // MultiMutatingHandler combines multiple mutating webhook handlers into a single // mutating webhook handler. Handlers are called in sequential order, and the first // `allowed: false` response may short-circuit the rest. Users must take care to @@ -87,18 +74,21 @@ func MultiMutatingHandler(handlers ...Handler) Handler { type multiValidating []Handler func (hs multiValidating) Handle(ctx context.Context, req Request) Response { + warnings := []string{} for _, handler := range hs { resp := handler.Handle(ctx, req) if !resp.Allowed { return resp } + warnings = append(warnings, resp.Warnings...) } return Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: true, Result: &metav1.Status{ Code: http.StatusOK, }, + Warnings: warnings, }, } } @@ -109,18 +99,3 @@ func (hs multiValidating) Handle(ctx context.Context, req Request) Response { func MultiValidatingHandler(handlers ...Handler) Handler { return multiValidating(handlers) } - -// InjectFunc injects the field setter into the handlers. -func (hs multiValidating) InjectFunc(f inject.Func) error { - // inject directly into the handlers. It would be more correct - // to do this in a sync.Once in Handle (since we don't have some - // other start/finalize-type method), but it's more efficient to - // do it here, presumably. - for _, handler := range hs { - if err := f(handler); err != nil { - return err - } - } - - return nil -} diff --git a/pkg/webhook/admission/multi_test.go b/pkg/webhook/admission/multi_test.go index 8501e62f62..888836ed67 100644 --- a/pkg/webhook/admission/multi_test.go +++ b/pkg/webhook/admission/multi_test.go @@ -19,18 +19,18 @@ package admission import ( "context" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/appscode/jsonpatch" - admissionv1beta1 "k8s.io/api/admission/v1beta1" + jsonpatch "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" ) var _ = Describe("Multi-Handler Admission Webhooks", func() { alwaysAllow := &fakeHandler{ fn: func(ctx context.Context, req Request) Response { return Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: true, }, } @@ -39,30 +39,53 @@ var _ = Describe("Multi-Handler Admission Webhooks", func() { alwaysDeny := &fakeHandler{ fn: func(ctx context.Context, req Request) Response { return Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: false, }, } }, } + withWarnings := &fakeHandler{ + fn: func(ctx context.Context, req Request) Response { + return Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + Warnings: []string{"handler-warning"}, + }, + } + }, + } + Context("with validating handlers", func() { - It("should deny the request if any handler denies the request", func() { + It("should deny the request if any handler denies the request", func(ctx SpecContext) { By("setting up a handler with accept and deny") handler := MultiValidatingHandler(alwaysAllow, alwaysDeny) By("checking that the handler denies the request") - resp := handler.Handle(context.Background(), Request{}) + resp := handler.Handle(ctx, Request{}) Expect(resp.Allowed).To(BeFalse()) + Expect(resp.Warnings).To(BeEmpty()) }) - It("should allow the request if all handlers allow the request", func() { + It("should allow the request if all handlers allow the request", func(ctx SpecContext) { By("setting up a handler with only accept") handler := MultiValidatingHandler(alwaysAllow, alwaysAllow) By("checking that the handler allows the request") - resp := handler.Handle(context.Background(), Request{}) + resp := handler.Handle(ctx, Request{}) + Expect(resp.Allowed).To(BeTrue()) + Expect(resp.Warnings).To(BeEmpty()) + }) + + It("should show the warnings if all handlers allow the request", func(ctx SpecContext) { + By("setting up a handler with only accept") + handler := MultiValidatingHandler(alwaysAllow, withWarnings) + + By("checking that the handler allows the request") + resp := handler.Handle(ctx, Request{}) Expect(resp.Allowed).To(BeTrue()) + Expect(resp.Warnings).To(HaveLen(1)) }) }) @@ -82,9 +105,9 @@ var _ = Describe("Multi-Handler Admission Webhooks", func() { Value: "2", }, }, - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: true, - PatchType: func() *admissionv1beta1.PatchType { pt := admissionv1beta1.PatchTypeJSONPatch; return &pt }(), + PatchType: func() *admissionv1.PatchType { pt := admissionv1.PatchTypeJSONPatch; return &pt }(), }, } }, @@ -99,34 +122,68 @@ var _ = Describe("Multi-Handler Admission Webhooks", func() { Value: "world", }, }, - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: true, - PatchType: func() *admissionv1beta1.PatchType { pt := admissionv1beta1.PatchTypeJSONPatch; return &pt }(), + PatchType: func() *admissionv1.PatchType { pt := admissionv1.PatchTypeJSONPatch; return &pt }(), }, } }, } - It("should not return any patches if the request is denied", func() { + patcher3 := &fakeHandler{ + fn: func(ctx context.Context, req Request) Response { + return Response{ + Patches: []jsonpatch.JsonPatchOperation{ + { + Operation: "add", + Path: "/metadata/annotation/newest-key", + Value: "value", + }, + }, + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + Warnings: []string{"annotation-warning"}, + PatchType: func() *admissionv1.PatchType { pt := admissionv1.PatchTypeJSONPatch; return &pt }(), + }, + } + }, + } + + It("should not return any patches if the request is denied", func(ctx SpecContext) { By("setting up a webhook with some patches and a deny") handler := MultiMutatingHandler(patcher1, patcher2, alwaysDeny) By("checking that the handler denies the request and produces no patches") - resp := handler.Handle(context.Background(), Request{}) + resp := handler.Handle(ctx, Request{}) Expect(resp.Allowed).To(BeFalse()) Expect(resp.Patches).To(BeEmpty()) }) - It("should produce all patches if the requests are all allowed", func() { + It("should produce all patches if the requests are all allowed", func(ctx SpecContext) { By("setting up a webhook with some patches") handler := MultiMutatingHandler(patcher1, patcher2, alwaysAllow) By("checking that the handler accepts the request and returns all patches") - resp := handler.Handle(context.Background(), Request{}) + resp := handler.Handle(ctx, Request{}) Expect(resp.Allowed).To(BeTrue()) Expect(resp.Patch).To(Equal([]byte( `[{"op":"add","path":"/metadata/annotation/new-key","value":"new-value"},` + `{"op":"replace","path":"/spec/replicas","value":"2"},{"op":"add","path":"/metadata/annotation/hello","value":"world"}]`))) }) + + It("should produce all patches if the requests are all allowed and show warnings", func(ctx SpecContext) { + By("setting up a webhook with some patches") + handler := MultiMutatingHandler(patcher1, patcher2, alwaysAllow, patcher3) + + By("checking that the handler accepts the request and returns all patches") + resp := handler.Handle(ctx, Request{}) + Expect(resp.Allowed).To(BeTrue()) + Expect(resp.Patch).To(Equal([]byte( + `[{"op":"add","path":"/metadata/annotation/new-key","value":"new-value"},` + + `{"op":"replace","path":"/spec/replicas","value":"2"},{"op":"add","path":"/metadata/annotation/hello","value":"world"},` + + `{"op":"add","path":"/metadata/annotation/newest-key","value":"value"}]`))) + Expect(resp.Warnings).To(HaveLen(1)) + }) + }) }) diff --git a/pkg/webhook/admission/response.go b/pkg/webhook/admission/response.go index bb625ed0d3..ec1c88c989 100644 --- a/pkg/webhook/admission/response.go +++ b/pkg/webhook/admission/response.go @@ -19,29 +19,28 @@ package admission import ( "net/http" - "github.com/appscode/jsonpatch" - - admissionv1beta1 "k8s.io/api/admission/v1beta1" + jsonpatch "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Allowed constructs a response indicating that the given operation // is allowed (without any patches). -func Allowed(reason string) Response { - return ValidationResponse(true, reason) +func Allowed(message string) Response { + return ValidationResponse(true, message) } // Denied constructs a response indicating that the given operation // is not allowed. -func Denied(reason string) Response { - return ValidationResponse(false, reason) +func Denied(message string) Response { + return ValidationResponse(false, message) } // Patched constructs a response indicating that the given operation is // allowed, and that the target object should be modified by the given // JSONPatch operations. -func Patched(reason string, patches ...jsonpatch.JsonPatchOperation) Response { - resp := Allowed(reason) +func Patched(message string, patches ...jsonpatch.JsonPatchOperation) Response { + resp := Allowed(message) resp.Patches = patches return resp @@ -50,7 +49,7 @@ func Patched(reason string, patches ...jsonpatch.JsonPatchOperation) Response { // Errored creates a new Response for error-handling a request. func Errored(code int32, err error) Response { return Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ Code: code, @@ -61,21 +60,24 @@ func Errored(code int32, err error) Response { } // ValidationResponse returns a response for admitting a request. -func ValidationResponse(allowed bool, reason string) Response { +func ValidationResponse(allowed bool, message string) Response { code := http.StatusForbidden + reason := metav1.StatusReasonForbidden if allowed { code = http.StatusOK + reason = "" } resp := Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: allowed, Result: &metav1.Status{ - Code: int32(code), + Code: int32(code), + Reason: reason, }, }, } - if len(reason) > 0 { - resp.Result.Reason = metav1.StatusReason(reason) + if len(message) > 0 { + resp.Result.Message = message } return resp } @@ -90,9 +92,33 @@ func PatchResponseFromRaw(original, current []byte) Response { } return Response{ Patches: patches, - AdmissionResponse: admissionv1beta1.AdmissionResponse{ - Allowed: true, - PatchType: func() *admissionv1beta1.PatchType { pt := admissionv1beta1.PatchTypeJSONPatch; return &pt }(), + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + PatchType: func() *admissionv1.PatchType { + if len(patches) == 0 { + return nil + } + pt := admissionv1.PatchTypeJSONPatch + return &pt + }(), + }, + } +} + +// validationResponseFromStatus returns a response for admitting a request with provided Status object. +func validationResponseFromStatus(allowed bool, status metav1.Status) Response { + resp := Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: allowed, + Result: &status, }, } + return resp +} + +// WithWarnings adds the given warnings to the Response. +// If any warnings were already given, they will not be overwritten. +func (r Response) WithWarnings(warnings ...string) Response { + r.AdmissionResponse.Warnings = append(r.AdmissionResponse.Warnings, warnings...) + return r } diff --git a/pkg/webhook/admission/response_test.go b/pkg/webhook/admission/response_test.go index eb7ec832b6..03107c92f5 100644 --- a/pkg/webhook/admission/response_test.go +++ b/pkg/webhook/admission/response_test.go @@ -20,11 +20,11 @@ import ( "errors" "net/http" - "github.com/appscode/jsonpatch" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - admissionv1beta1 "k8s.io/api/admission/v1beta1" + jsonpatch "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,7 +33,7 @@ var _ = Describe("Admission Webhook Response Helpers", func() { It("should return an 'allowed' response", func() { Expect(Allowed("")).To(Equal( Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: true, Result: &metav1.Status{ Code: http.StatusOK, @@ -46,11 +46,11 @@ var _ = Describe("Admission Webhook Response Helpers", func() { It("should populate a status with a reason when a reason is given", func() { Expect(Allowed("acceptable")).To(Equal( Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: true, Result: &metav1.Status{ - Code: http.StatusOK, - Reason: "acceptable", + Code: http.StatusOK, + Message: "acceptable", }, }, }, @@ -62,10 +62,11 @@ var _ = Describe("Admission Webhook Response Helpers", func() { It("should return a 'not allowed' response", func() { Expect(Denied("")).To(Equal( Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ - Code: http.StatusForbidden, + Code: http.StatusForbidden, + Reason: metav1.StatusReasonForbidden, }, }, }, @@ -75,11 +76,12 @@ var _ = Describe("Admission Webhook Response Helpers", func() { It("should populate a status with a reason when a reason is given", func() { Expect(Denied("UNACCEPTABLE!")).To(Equal( Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ - Code: http.StatusForbidden, - Reason: "UNACCEPTABLE!", + Code: http.StatusForbidden, + Reason: metav1.StatusReasonForbidden, + Message: "UNACCEPTABLE!", }, }, }, @@ -102,7 +104,7 @@ var _ = Describe("Admission Webhook Response Helpers", func() { It("should return an 'allowed' response with the given patches", func() { Expect(Patched("", ops...)).To(Equal( Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: true, Result: &metav1.Status{ Code: http.StatusOK, @@ -115,11 +117,11 @@ var _ = Describe("Admission Webhook Response Helpers", func() { It("should populate a status with a reason when a reason is given", func() { Expect(Patched("some changes", ops...)).To(Equal( Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: true, Result: &metav1.Status{ - Code: http.StatusOK, - Reason: "some changes", + Code: http.StatusOK, + Message: "some changes", }, }, Patches: ops, @@ -132,7 +134,7 @@ var _ = Describe("Admission Webhook Response Helpers", func() { It("should return a denied response with an error", func() { err := errors.New("this is an error") expected := Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ Code: http.StatusBadRequest, @@ -146,15 +148,15 @@ var _ = Describe("Admission Webhook Response Helpers", func() { }) Describe("ValidationResponse", func() { - It("should populate a status with a reason when a reason is given", func() { + It("should populate a status with a message when a message is given", func() { By("checking that a message is populated for 'allowed' responses") Expect(ValidationResponse(true, "acceptable")).To(Equal( Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: true, Result: &metav1.Status{ - Code: http.StatusOK, - Reason: "acceptable", + Code: http.StatusOK, + Message: "acceptable", }, }, }, @@ -163,11 +165,12 @@ var _ = Describe("Admission Webhook Response Helpers", func() { By("checking that a message is populated for 'denied' responses") Expect(ValidationResponse(false, "UNACCEPTABLE!")).To(Equal( Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ - Code: http.StatusForbidden, - Reason: "UNACCEPTABLE!", + Code: http.StatusForbidden, + Reason: metav1.StatusReasonForbidden, + Message: "UNACCEPTABLE!", }, }, }, @@ -178,7 +181,7 @@ var _ = Describe("Admission Webhook Response Helpers", func() { By("checking that it returns an 'allowed' response when allowed is true") Expect(ValidationResponse(true, "")).To(Equal( Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: true, Result: &metav1.Status{ Code: http.StatusOK, @@ -190,10 +193,11 @@ var _ = Describe("Admission Webhook Response Helpers", func() { By("checking that it returns an 'denied' response when allowed is false") Expect(ValidationResponse(false, "")).To(Equal( Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ - Code: http.StatusForbidden, + Code: http.StatusForbidden, + Reason: metav1.StatusReasonForbidden, }, }, }, @@ -207,13 +211,39 @@ var _ = Describe("Admission Webhook Response Helpers", func() { Patches: []jsonpatch.JsonPatchOperation{ {Operation: "replace", Path: "/a", Value: "bar"}, }, - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: true, - PatchType: func() *admissionv1beta1.PatchType { pt := admissionv1beta1.PatchTypeJSONPatch; return &pt }(), + PatchType: func() *admissionv1.PatchType { pt := admissionv1.PatchTypeJSONPatch; return &pt }(), }, } resp := PatchResponseFromRaw([]byte(`{"a": "foo"}`), []byte(`{"a": "bar"}`)) Expect(resp).To(Equal(expected)) }) }) + + Describe("WithWarnings", func() { + It("should add the warnings to the existing response without removing any existing warnings", func() { + initialResponse := Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + Result: &metav1.Status{ + Code: http.StatusOK, + }, + Warnings: []string{"existing-warning"}, + }, + } + warnings := []string{"additional-warning-1", "additional-warning-2"} + expectedResponse := Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + Result: &metav1.Status{ + Code: http.StatusOK, + }, + Warnings: []string{"existing-warning", "additional-warning-1", "additional-warning-2"}, + }, + } + + Expect(initialResponse.WithWarnings(warnings...)).To(Equal(expectedResponse)) + }) + }) }) diff --git a/pkg/webhook/admission/validator.go b/pkg/webhook/admission/validator.go deleted file mode 100644 index 282bb9b47c..0000000000 --- a/pkg/webhook/admission/validator.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package admission - -import ( - "context" - "net/http" - - "k8s.io/api/admission/v1beta1" - "k8s.io/apimachinery/pkg/runtime" -) - -// Validator defines functions for validating an operation -type Validator interface { - runtime.Object - ValidateCreate() error - ValidateUpdate(old runtime.Object) error -} - -// ValidatingWebhookFor creates a new Webhook for validating the provided type. -func ValidatingWebhookFor(validator Validator) *Webhook { - return &Webhook{ - Handler: &validatingHandler{validator: validator}, - } -} - -type validatingHandler struct { - validator Validator - decoder *Decoder -} - -var _ DecoderInjector = &validatingHandler{} - -// InjectDecoder injects the decoder into a validatingHandler. -func (h *validatingHandler) InjectDecoder(d *Decoder) error { - h.decoder = d - return nil -} - -// Handle handles admission requests. -func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { - if h.validator == nil { - panic("validator should never be nil") - } - - // Get the object in the request - obj := h.validator.DeepCopyObject().(Validator) - if req.Operation == v1beta1.Create { - err := h.decoder.Decode(req, obj) - if err != nil { - return Errored(http.StatusBadRequest, err) - } - - err = obj.ValidateCreate() - if err != nil { - return Denied(err.Error()) - } - } - - if req.Operation == v1beta1.Update { - oldObj := obj.DeepCopyObject() - - err := h.decoder.DecodeRaw(req.Object, obj) - if err != nil { - return Errored(http.StatusBadRequest, err) - } - err = h.decoder.DecodeRaw(req.OldObject, oldObj) - if err != nil { - return Errored(http.StatusBadRequest, err) - } - - err = obj.ValidateUpdate(oldObj) - if err != nil { - return Denied(err.Error()) - } - } - - return Allowed("") -} diff --git a/pkg/webhook/admission/validator_custom.go b/pkg/webhook/admission/validator_custom.go new file mode 100644 index 0000000000..ef1be52a8f --- /dev/null +++ b/pkg/webhook/admission/validator_custom.go @@ -0,0 +1,128 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "errors" + "fmt" + "net/http" + + v1 "k8s.io/api/admission/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" +) + +// Warnings represents warning messages. +type Warnings []string + +// CustomValidator defines functions for validating an operation. +// The object to be validated is passed into methods as a parameter. +type CustomValidator interface { + // ValidateCreate validates the object on creation. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateCreate(ctx context.Context, obj runtime.Object) (warnings Warnings, err error) + + // ValidateUpdate validates the object on update. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (warnings Warnings, err error) + + // ValidateDelete validates the object on deletion. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateDelete(ctx context.Context, obj runtime.Object) (warnings Warnings, err error) +} + +// WithCustomValidator creates a new Webhook for validating the provided type. +func WithCustomValidator(scheme *runtime.Scheme, obj runtime.Object, validator CustomValidator) *Webhook { + return &Webhook{ + Handler: &validatorForType{object: obj, validator: validator, decoder: NewDecoder(scheme)}, + } +} + +type validatorForType struct { + validator CustomValidator + object runtime.Object + decoder Decoder +} + +// Handle handles admission requests. +func (h *validatorForType) Handle(ctx context.Context, req Request) Response { + if h.decoder == nil { + panic("decoder should never be nil") + } + if h.validator == nil { + panic("validator should never be nil") + } + if h.object == nil { + panic("object should never be nil") + } + + ctx = NewContextWithRequest(ctx, req) + + // Get the object in the request + obj := h.object.DeepCopyObject() + + var err error + var warnings []string + + switch req.Operation { + case v1.Connect: + // No validation for connect requests. + // TODO(vincepri): Should we validate CONNECT requests? In what cases? + case v1.Create: + if err := h.decoder.Decode(req, obj); err != nil { + return Errored(http.StatusBadRequest, err) + } + + warnings, err = h.validator.ValidateCreate(ctx, obj) + case v1.Update: + oldObj := obj.DeepCopyObject() + if err := h.decoder.DecodeRaw(req.Object, obj); err != nil { + return Errored(http.StatusBadRequest, err) + } + if err := h.decoder.DecodeRaw(req.OldObject, oldObj); err != nil { + return Errored(http.StatusBadRequest, err) + } + + warnings, err = h.validator.ValidateUpdate(ctx, oldObj, obj) + case v1.Delete: + // In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346 + // OldObject contains the object being deleted + if err := h.decoder.DecodeRaw(req.OldObject, obj); err != nil { + return Errored(http.StatusBadRequest, err) + } + + warnings, err = h.validator.ValidateDelete(ctx, obj) + default: + return Errored(http.StatusBadRequest, fmt.Errorf("unknown operation %q", req.Operation)) + } + + // Check the error message first. + if err != nil { + var apiStatus apierrors.APIStatus + if errors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()).WithWarnings(warnings...) + } + return Denied(err.Error()).WithWarnings(warnings...) + } + + // Return allowed if everything succeeded. + return Allowed("").WithWarnings(warnings...) +} diff --git a/pkg/webhook/admission/validator_custom_test.go b/pkg/webhook/admission/validator_custom_test.go new file mode 100644 index 0000000000..7c9615df71 --- /dev/null +++ b/pkg/webhook/admission/validator_custom_test.go @@ -0,0 +1,508 @@ +/* +Copyright 2021 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "errors" + "net/http" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + admissionv1 "k8s.io/api/admission/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var fakeValidatorVK = schema.GroupVersionKind{Group: "foo.test.org", Version: "v1", Kind: "fakeValidator"} + +var _ = Describe("customValidatingHandler", func() { + + Context("when dealing with successful results without warning", func() { + val := &fakeCustomValidator{ErrorToReturn: nil, GVKToReturn: fakeValidatorVK, WarningsToReturn: nil} + f := &fakeValidator{} + handler := WithCustomValidator(admissionScheme, f, val) + + It("should return 200 in response when create succeeds", func(ctx SpecContext) { + + response := handler.Handle(ctx, Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + Object: runtime.RawExtension{ + Raw: []byte("{}"), + Object: f, + }, + }, + }) + + Expect(response.Allowed).Should(BeTrue()) + Expect(response.Result.Code).Should(Equal(int32(http.StatusOK))) + }) + + It("should return 200 in response when update succeeds", func(ctx SpecContext) { + + response := handler.Handle(ctx, Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Update, + Object: runtime.RawExtension{ + Raw: []byte("{}"), + Object: f, + }, + OldObject: runtime.RawExtension{ + Raw: []byte("{}"), + Object: f, + }, + }, + }) + Expect(response.Allowed).Should(BeTrue()) + Expect(response.Result.Code).Should(Equal(int32(http.StatusOK))) + }) + + It("should return 200 in response when delete succeeds", func(ctx SpecContext) { + + response := handler.Handle(ctx, Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Delete, + OldObject: runtime.RawExtension{ + Raw: []byte("{}"), + Object: f, + }, + }, + }) + Expect(response.Allowed).Should(BeTrue()) + Expect(response.Result.Code).Should(Equal(int32(http.StatusOK))) + }) + }) + + const warningMessage = "warning message" + const anotherWarningMessage = "another warning message" + Context("when dealing with successful results with warning", func() { + f := &fakeValidator{} + val := &fakeCustomValidator{ErrorToReturn: nil, GVKToReturn: fakeValidatorVK, WarningsToReturn: []string{ + warningMessage, + anotherWarningMessage, + }} + handler := WithCustomValidator(admissionScheme, f, val) + It("should return 200 in response when create succeeds, with warning messages", func(ctx SpecContext) { + response := handler.Handle(ctx, Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + Object: runtime.RawExtension{ + Raw: []byte("{}"), + Object: f, + }, + }, + }) + + Expect(response.Allowed).Should(BeTrue()) + Expect(response.Result.Code).Should(Equal(int32(http.StatusOK))) + Expect(response.AdmissionResponse.Warnings).Should(ContainElement(warningMessage)) + Expect(response.AdmissionResponse.Warnings).Should(ContainElement(anotherWarningMessage)) + }) + + It("should return 200 in response when update succeeds, with warning messages", func(ctx SpecContext) { + + response := handler.Handle(ctx, Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Update, + Object: runtime.RawExtension{ + Raw: []byte("{}"), + Object: f, + }, + OldObject: runtime.RawExtension{ + Raw: []byte("{}"), + Object: f, + }, + }, + }) + Expect(response.Allowed).Should(BeTrue()) + Expect(response.Result.Code).Should(Equal(int32(http.StatusOK))) + Expect(response.AdmissionResponse.Warnings).Should(ContainElement(warningMessage)) + Expect(response.AdmissionResponse.Warnings).Should(ContainElement(anotherWarningMessage)) + }) + + It("should return 200 in response when delete succeeds, with warning messages", func(ctx SpecContext) { + + response := handler.Handle(ctx, Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Delete, + OldObject: runtime.RawExtension{ + Raw: []byte("{}"), + + Object: f, + }, + }, + }) + Expect(response.Allowed).Should(BeTrue()) + Expect(response.Result.Code).Should(Equal(int32(http.StatusOK))) + Expect(response.AdmissionResponse.Warnings).Should(ContainElement(warningMessage)) + Expect(response.AdmissionResponse.Warnings).Should(ContainElement(anotherWarningMessage)) + }) + }) + + Context("when dealing with Status errors, with warning messages", func() { + // Status error would overwrite the warning messages, so no warning messages should be observed. + expectedError := &apierrors.StatusError{ + ErrStatus: metav1.Status{ + Message: "some message", + Code: http.StatusUnprocessableEntity, + }, + } + f := &fakeValidator{} + val := &fakeCustomValidator{ErrorToReturn: expectedError, GVKToReturn: fakeValidatorVK, WarningsToReturn: []string{warningMessage, anotherWarningMessage}} + handler := WithCustomValidator(admissionScheme, f, val) + + It("should propagate the Status from ValidateCreate's return value to the HTTP response", func(ctx SpecContext) { + + response := handler.Handle(ctx, Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + Object: runtime.RawExtension{ + Raw: []byte("{}"), + Object: f, + }, + }, + }) + + Expect(response.Allowed).Should(BeFalse()) + Expect(response.Result.Code).Should(Equal(expectedError.Status().Code)) + Expect(*response.Result).Should(Equal(expectedError.Status())) + Expect(response.AdmissionResponse.Warnings).Should(ContainElements(warningMessage)) + Expect(response.AdmissionResponse.Warnings).Should(ContainElements(anotherWarningMessage)) + + }) + + It("should propagate the Status from ValidateUpdate's return value to the HTTP response", func(ctx SpecContext) { + + response := handler.Handle(ctx, Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Update, + Object: runtime.RawExtension{ + Raw: []byte("{}"), + Object: f, + }, + OldObject: runtime.RawExtension{ + Raw: []byte("{}"), + + Object: f, + }, + }, + }) + + Expect(response.Allowed).Should(BeFalse()) + Expect(response.Result.Code).Should(Equal(expectedError.Status().Code)) + Expect(*response.Result).Should(Equal(expectedError.Status())) + Expect(response.AdmissionResponse.Warnings).Should(ContainElements(warningMessage)) + Expect(response.AdmissionResponse.Warnings).Should(ContainElements(anotherWarningMessage)) + + }) + + It("should propagate the Status from ValidateDelete's return value to the HTTP response", func(ctx SpecContext) { + + response := handler.Handle(ctx, Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Delete, + + OldObject: runtime.RawExtension{ + Raw: []byte("{}"), + Object: f, + }, + }, + }) + + Expect(response.Allowed).Should(BeFalse()) + Expect(response.Result.Code).Should(Equal(expectedError.Status().Code)) + Expect(*response.Result).Should(Equal(expectedError.Status())) + Expect(response.AdmissionResponse.Warnings).Should(ContainElements(warningMessage)) + Expect(response.AdmissionResponse.Warnings).Should(ContainElements(anotherWarningMessage)) + + }) + + }) + + Context("when dealing with Status errors, without warning messages", func() { + + expectedError := &apierrors.StatusError{ + ErrStatus: metav1.Status{ + Message: "some message", + Code: http.StatusUnprocessableEntity, + }, + } + f := &fakeValidator{} + val := &fakeCustomValidator{ErrorToReturn: expectedError, GVKToReturn: fakeValidatorVK, WarningsToReturn: nil} + handler := WithCustomValidator(admissionScheme, f, val) + + It("should propagate the Status from ValidateCreate's return value to the HTTP response", func(ctx SpecContext) { + + response := handler.Handle(ctx, Request{ + + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + Object: runtime.RawExtension{ + Raw: []byte("{}"), + Object: f, + }, + }, + }) + + Expect(response.Allowed).Should(BeFalse()) + Expect(response.Result.Code).Should(Equal(expectedError.Status().Code)) + Expect(*response.Result).Should(Equal(expectedError.Status())) + + }) + + It("should propagate the Status from ValidateUpdate's return value to the HTTP response", func(ctx SpecContext) { + + response := handler.Handle(ctx, Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Update, + Object: runtime.RawExtension{ + Raw: []byte("{}"), + Object: f, + }, + OldObject: runtime.RawExtension{ + Raw: []byte("{}"), + Object: f, + }, + }, + }) + + Expect(response.Allowed).Should(BeFalse()) + Expect(response.Result.Code).Should(Equal(expectedError.Status().Code)) + Expect(*response.Result).Should(Equal(expectedError.Status())) + + }) + + It("should propagate the Status from ValidateDelete's return value to the HTTP response", func(ctx SpecContext) { + + response := handler.Handle(ctx, Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Delete, + OldObject: runtime.RawExtension{ + Raw: []byte("{}"), + Object: f, + }, + }, + }) + + Expect(response.Allowed).Should(BeFalse()) + Expect(response.Result.Code).Should(Equal(expectedError.Status().Code)) + + Expect(*response.Result).Should(Equal(expectedError.Status())) + + }) + + }) + + Context("when dealing with non-status errors, without warning messages", func() { + + expectedError := errors.New("some error") + f := &fakeValidator{} + val := &fakeCustomValidator{ErrorToReturn: expectedError, GVKToReturn: fakeValidatorVK} + handler := WithCustomValidator(admissionScheme, f, val) + + It("should return 403 response when ValidateCreate with error message embedded", func(ctx SpecContext) { + + response := handler.Handle(ctx, Request{ + + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + Object: runtime.RawExtension{ + Raw: []byte("{}"), + Object: f, + }, + }, + }) + Expect(response.Allowed).Should(BeFalse()) + Expect(response.Result.Code).Should(Equal(int32(http.StatusForbidden))) + Expect(response.Result.Reason).Should(Equal(metav1.StatusReasonForbidden)) + Expect(response.Result.Message).Should(Equal(expectedError.Error())) + + }) + + It("should return 403 response when ValidateUpdate returns non-APIStatus error", func(ctx SpecContext) { + + response := handler.Handle(ctx, Request{ + + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Update, + Object: runtime.RawExtension{ + Raw: []byte("{}"), + Object: f, + }, + OldObject: runtime.RawExtension{ + Raw: []byte("{}"), + Object: f, + }, + }, + }) + Expect(response.Allowed).Should(BeFalse()) + Expect(response.Result.Code).Should(Equal(int32(http.StatusForbidden))) + Expect(response.Result.Reason).Should(Equal(metav1.StatusReasonForbidden)) + Expect(response.Result.Message).Should(Equal(expectedError.Error())) + + }) + + It("should return 403 response when ValidateDelete returns non-APIStatus error", func(ctx SpecContext) { + response := handler.Handle(ctx, Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Delete, + OldObject: runtime.RawExtension{ + Raw: []byte("{}"), + Object: f, + }, + }, + }) + Expect(response.Allowed).Should(BeFalse()) + Expect(response.Result.Code).Should(Equal(int32(http.StatusForbidden))) + Expect(response.Result.Reason).Should(Equal(metav1.StatusReasonForbidden)) + Expect(response.Result.Message).Should(Equal(expectedError.Error())) + }) + }) + + Context("when dealing with non-status errors, with warning messages", func() { + + expectedError := errors.New("some error") + f := &fakeValidator{} + val := &fakeCustomValidator{ErrorToReturn: expectedError, GVKToReturn: fakeValidatorVK, WarningsToReturn: []string{warningMessage, anotherWarningMessage}} + handler := WithCustomValidator(admissionScheme, f, val) + + It("should return 403 response when ValidateCreate with error message embedded", func(ctx SpecContext) { + + response := handler.Handle(ctx, Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + + Operation: admissionv1.Create, + Object: runtime.RawExtension{ + Raw: []byte("{}"), + Object: f, + }, + }, + }) + Expect(response.Allowed).Should(BeFalse()) + + Expect(response.Result.Code).Should(Equal(int32(http.StatusForbidden))) + Expect(response.Result.Reason).Should(Equal(metav1.StatusReasonForbidden)) + Expect(response.Result.Message).Should(Equal(expectedError.Error())) + Expect(response.AdmissionResponse.Warnings).Should(ContainElement(warningMessage)) + Expect(response.AdmissionResponse.Warnings).Should(ContainElement(anotherWarningMessage)) + }) + + It("should return 403 response when ValidateUpdate returns non-APIStatus error", func(ctx SpecContext) { + + response := handler.Handle(ctx, Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Update, + Object: runtime.RawExtension{ + Raw: []byte("{}"), + Object: f, + }, + OldObject: runtime.RawExtension{ + + Raw: []byte("{}"), + Object: f, + }, + }, + }) + Expect(response.Allowed).Should(BeFalse()) + Expect(response.Result.Code).Should(Equal(int32(http.StatusForbidden))) + Expect(response.Result.Reason).Should(Equal(metav1.StatusReasonForbidden)) + Expect(response.Result.Message).Should(Equal(expectedError.Error())) + + Expect(response.AdmissionResponse.Warnings).Should(ContainElement(warningMessage)) + Expect(response.AdmissionResponse.Warnings).Should(ContainElement(anotherWarningMessage)) + + }) + + It("should return 403 response when ValidateDelete returns non-APIStatus error", func(ctx SpecContext) { + response := handler.Handle(ctx, Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Delete, + OldObject: runtime.RawExtension{ + Raw: []byte("{}"), + Object: f, + }, + }, + }) + Expect(response.Allowed).Should(BeFalse()) + Expect(response.Result.Code).Should(Equal(int32(http.StatusForbidden))) + Expect(response.Result.Reason).Should(Equal(metav1.StatusReasonForbidden)) + Expect(response.Result.Message).Should(Equal(expectedError.Error())) + Expect(response.AdmissionResponse.Warnings).Should(ContainElement(warningMessage)) + Expect(response.AdmissionResponse.Warnings).Should(ContainElement(anotherWarningMessage)) + + }) + }) + + PIt("should return 400 in response when create fails on decode", func() {}) + + PIt("should return 400 in response when update fails on decoding new object", func() {}) + + PIt("should return 400 in response when update fails on decoding old object", func() {}) + + PIt("should return 400 in response when delete fails on decode", func() {}) + +}) + +// fakeCustomValidator provides fake validating webhook functionality for testing +// It implements the admission.CustomValidator interface and +// rejects all requests with the same configured error +// or passes if ErrorToReturn is nil. +// And it would always return configured warning messages WarningsToReturn. +type fakeCustomValidator struct { + // ErrorToReturn is the error for which the fakeValidator rejects all requests + ErrorToReturn error `json:"errorToReturn,omitempty"` + // GVKToReturn is the GroupVersionKind that the webhook operates on + GVKToReturn schema.GroupVersionKind + // WarningsToReturn is the warnings for fakeValidator returns to all requests + WarningsToReturn []string +} + +func (v *fakeCustomValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (warnings Warnings, err error) { + return v.WarningsToReturn, v.ErrorToReturn +} + +func (v *fakeCustomValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (warnings Warnings, err error) { + return v.WarningsToReturn, v.ErrorToReturn +} + +func (v *fakeCustomValidator) ValidateDelete(ctx context.Context, obj runtime.Object) (warnings Warnings, err error) { + return v.WarningsToReturn, v.ErrorToReturn +} + +type fakeValidator struct { + // GVKToReturn is the GroupVersionKind that the webhook operates on + GVKToReturn schema.GroupVersionKind +} + +func (v *fakeValidator) SetGroupVersionKind(gvk schema.GroupVersionKind) { + v.GVKToReturn = gvk +} + +func (v *fakeValidator) GroupVersionKind() schema.GroupVersionKind { + return v.GVKToReturn +} + +func (v *fakeValidator) GetObjectKind() schema.ObjectKind { + return v +} + +func (v *fakeValidator) DeepCopyObject() runtime.Object { + return &fakeValidator{ + GVKToReturn: v.GVKToReturn, + } +} diff --git a/pkg/webhook/admission/webhook.go b/pkg/webhook/admission/webhook.go index 2f6cd9a88f..cba6da2cb0 100644 --- a/pkg/webhook/admission/webhook.go +++ b/pkg/webhook/admission/webhook.go @@ -19,16 +19,21 @@ package admission import ( "context" "errors" + "fmt" "net/http" + "sync" - "github.com/appscode/jsonpatch" "github.com/go-logr/logr" - admissionv1beta1 "k8s.io/api/admission/v1beta1" + "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/json" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog/v2" + admissionmetrics "sigs.k8s.io/controller-runtime/pkg/webhook/admission/metrics" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" ) var ( @@ -41,7 +46,7 @@ var ( // name, namespace), as well as the operation in question // (e.g. Get, Create, etc), and the object itself. type Request struct { - admissionv1beta1.AdmissionRequest + admissionv1.AdmissionRequest } // Response is the output of an admission handler. @@ -57,7 +62,7 @@ type Response struct { Patches []jsonpatch.JsonPatchOperation // AdmissionResponse is the raw admission response. // The Patch field in it will be overwritten by the listed patches. - admissionv1beta1.AdmissionResponse + admissionv1.AdmissionResponse } // Complete populates any fields that are yet to be set in @@ -84,7 +89,7 @@ func (r *Response) Complete(req Request) error { if err != nil { return err } - patchType := admissionv1beta1.PatchTypeJSONPatch + patchType := admissionv1.PatchTypeJSONPatch r.PatchType = &patchType return nil @@ -92,6 +97,10 @@ func (r *Response) Complete(req Request) error { // Handler can handle an AdmissionRequest. type Handler interface { + // Handle yields a response to an AdmissionRequest. + // + // The supplied context is extracted from the received http.Request, allowing wrapping + // http.Handlers to inject values into and control cancelation of downstream request processing. Handle(context.Context, Request) Response } @@ -106,91 +115,152 @@ func (f HandlerFunc) Handle(ctx context.Context, req Request) Response { } // Webhook represents each individual webhook. +// +// It must be registered with a webhook.Server or +// populated by StandaloneWebhook to be ran on an arbitrary HTTP server. type Webhook struct { // Handler actually processes an admission request returning whether it was allowed or denied, // and potentially patches to apply to the handler. Handler Handler - // decoder is constructed on receiving a scheme and passed down to then handler - decoder *Decoder + // RecoverPanic indicates whether the panic caused by webhook should be recovered. + // Defaults to true. + RecoverPanic *bool - log logr.Logger + // WithContextFunc will allow you to take the http.Request.Context() and + // add any additional information such as passing the request path or + // headers thus allowing you to read them from within the handler + WithContextFunc func(context.Context, *http.Request) context.Context + + // LogConstructor is used to construct a logger for logging messages during webhook calls + // based on the given base logger (which might carry more values like the webhook's path). + // Note: LogConstructor has to be able to handle nil requests as we are also using it + // outside the context of requests. + LogConstructor func(base logr.Logger, req *Request) logr.Logger + + setupLogOnce sync.Once + log logr.Logger } -// InjectLogger gets a handle to a logging instance, hopefully with more info about this particular webhook. -func (w *Webhook) InjectLogger(l logr.Logger) error { - w.log = l - return nil +// WithRecoverPanic takes a bool flag which indicates whether the panic caused by webhook should be recovered. +// Defaults to true. +func (wh *Webhook) WithRecoverPanic(recoverPanic bool) *Webhook { + wh.RecoverPanic = &recoverPanic + return wh } // Handle processes AdmissionRequest. // If the webhook is mutating type, it delegates the AdmissionRequest to each handler and merge the patches. // If the webhook is validating type, it delegates the AdmissionRequest to each handler and // deny the request if anyone denies. -func (w *Webhook) Handle(ctx context.Context, req Request) Response { - resp := w.Handler.Handle(ctx, req) +func (wh *Webhook) Handle(ctx context.Context, req Request) (response Response) { + defer func() { + if r := recover(); r != nil { + admissionmetrics.WebhookPanics.WithLabelValues().Inc() + + if wh.RecoverPanic == nil || *wh.RecoverPanic { + for _, fn := range utilruntime.PanicHandlers { + fn(ctx, r) + } + response = Errored(http.StatusInternalServerError, fmt.Errorf("panic: %v [recovered]", r)) + // Note: We explicitly have to set the response UID. Usually that is done via resp.Complete below, + // but if we encounter a panic in wh.Handler.Handle we are never going to reach resp.Complete. + response.UID = req.UID + return + } + + log := logf.FromContext(ctx) + log.Info(fmt.Sprintf("Observed a panic in webhook: %v", r)) + panic(r) + } + }() + + reqLog := wh.getLogger(&req) + ctx = logf.IntoContext(ctx, reqLog) + + resp := wh.Handler.Handle(ctx, req) if err := resp.Complete(req); err != nil { - w.log.Error(err, "unable to encode response") - return Errored(http.StatusInternalServerError, errUnableToEncodeResponse) + reqLog.Error(err, "unable to encode response") + resp := Errored(http.StatusInternalServerError, errUnableToEncodeResponse) + // Note: We explicitly have to set the response UID. Usually that is done via resp.Complete. + resp.UID = req.UID + return resp } return resp } -// InjectScheme injects a scheme into the webhook, in order to construct a Decoder. -func (w *Webhook) InjectScheme(s *runtime.Scheme) error { - // TODO(directxman12): we should have a better way to pass this down +// getLogger constructs a logger from the injected log and LogConstructor. +func (wh *Webhook) getLogger(req *Request) logr.Logger { + wh.setupLogOnce.Do(func() { + if wh.log.GetSink() == nil { + wh.log = logf.Log.WithName("admission") + } + }) - var err error - w.decoder, err = NewDecoder(s) - if err != nil { - return err + logConstructor := wh.LogConstructor + if logConstructor == nil { + logConstructor = DefaultLogConstructor } + return logConstructor(wh.log, req) +} - // inject the decoder here too, just in case the order of calling this is not - // scheme first, then inject func - if w.Handler != nil { - if _, err := InjectDecoderInto(w.GetDecoder(), w.Handler); err != nil { - return err - } +// DefaultLogConstructor adds some commonly interesting fields to the given logger. +func DefaultLogConstructor(base logr.Logger, req *Request) logr.Logger { + if req != nil { + return base.WithValues("object", klog.KRef(req.Namespace, req.Name), + "namespace", req.Namespace, "name", req.Name, + "resource", req.Resource, "user", req.UserInfo.Username, + "requestID", req.UID, + ) } - - return nil + return base } -// GetDecoder returns a decoder to decode the objects embedded in admission requests. -// It may be nil if we haven't received a scheme to use to determine object types yet. -func (w *Webhook) GetDecoder() *Decoder { - return w.decoder +// StandaloneOptions let you configure a StandaloneWebhook. +type StandaloneOptions struct { + // Logger to be used by the webhook. + // If none is set, it defaults to log.Log global logger. + Logger logr.Logger + // MetricsPath is used for labelling prometheus metrics + // by the path is served on. + // If none is set, prometheus metrics will not be generated. + MetricsPath string } -// InjectFunc injects the field setter into the webhook. -func (w *Webhook) InjectFunc(f inject.Func) error { - // inject directly into the handlers. It would be more correct - // to do this in a sync.Once in Handle (since we don't have some - // other start/finalize-type method), but it's more efficient to - // do it here, presumably. - - // also inject a decoder, and wrap this so that we get a setFields - // that injects a decoder (hopefully things don't ignore the duplicate - // InjectorInto call). - - var setFields inject.Func - setFields = func(target interface{}) error { - if err := f(target); err != nil { - return err - } - - if _, err := inject.InjectorInto(setFields, target); err != nil { - return err - } +// StandaloneWebhook prepares a webhook for use without a webhook.Server, +// passing in the information normally populated by webhook.Server +// and instrumenting the webhook with metrics. +// +// Use this to attach your webhook to an arbitrary HTTP server or mux. +// +// Note that you are responsible for terminating TLS if you use StandaloneWebhook +// in your own server/mux. In order to be accessed by a kubernetes cluster, +// all webhook servers require TLS. +func StandaloneWebhook(hook *Webhook, opts StandaloneOptions) (http.Handler, error) { + if opts.Logger.GetSink() != nil { + hook.log = opts.Logger + } + if opts.MetricsPath == "" { + return hook, nil + } + return metrics.InstrumentedHook(opts.MetricsPath, hook), nil +} - if _, err := InjectDecoderInto(w.GetDecoder(), target); err != nil { - return err - } +// requestContextKey is how we find the admission.Request in a context.Context. +type requestContextKey struct{} - return nil +// RequestFromContext returns an admission.Request from ctx. +func RequestFromContext(ctx context.Context) (Request, error) { + if v, ok := ctx.Value(requestContextKey{}).(Request); ok { + return v, nil } - return setFields(w.Handler) + return Request{}, errors.New("admission.Request not found in context") +} + +// NewContextWithRequest returns a new Context, derived from ctx, which carries the +// provided admission.Request. +func NewContextWithRequest(ctx context.Context, req Request) context.Context { + return context.WithValue(ctx, requestContextKey{}, req) } diff --git a/pkg/webhook/admission/webhook_test.go b/pkg/webhook/admission/webhook_test.go index 2097414de0..5176077368 100644 --- a/pkg/webhook/admission/webhook_test.go +++ b/pkg/webhook/admission/webhook_test.go @@ -18,26 +18,40 @@ package admission import ( "context" + "io" "net/http" - . "github.com/onsi/ginkgo" + "github.com/go-logr/logr" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - - "github.com/appscode/jsonpatch" - admissionv1beta1 "k8s.io/api/admission/v1beta1" + "github.com/onsi/gomega/gbytes" + "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" + authenticationv1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" machinerytypes "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" ) var _ = Describe("Admission Webhooks", func() { + var ( + logBuffer *gbytes.Buffer + testLogger logr.Logger + ) + + BeforeEach(func() { + logBuffer = gbytes.NewBuffer() + testLogger = zap.New(zap.JSONEncoder(), zap.WriteTo(io.MultiWriter(logBuffer, GinkgoWriter))) + }) + allowHandler := func() *Webhook { handler := &fakeHandler{ fn: func(ctx context.Context, req Request) Response { return Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: true, }, } @@ -50,45 +64,45 @@ var _ = Describe("Admission Webhooks", func() { return webhook } - It("should invoke the handler to get a response", func() { + It("should invoke the handler to get a response", func(ctx SpecContext) { By("setting up a webhook with an allow handler") webhook := allowHandler() By("invoking the webhook") - resp := webhook.Handle(context.Background(), Request{}) + resp := webhook.Handle(ctx, Request{}) By("checking that it allowed the request") Expect(resp.Allowed).To(BeTrue()) }) - It("should ensure that the response's UID is set to the request's UID", func() { + It("should ensure that the response's UID is set to the request's UID", func(ctx SpecContext) { By("setting up a webhook") webhook := allowHandler() By("invoking the webhook") - resp := webhook.Handle(context.Background(), Request{AdmissionRequest: admissionv1beta1.AdmissionRequest{UID: "foobar"}}) + resp := webhook.Handle(ctx, Request{AdmissionRequest: admissionv1.AdmissionRequest{UID: "foobar"}}) By("checking that the response share's the request's UID") Expect(resp.UID).To(Equal(machinerytypes.UID("foobar"))) }) - It("should populate the status on a response if one is not provided", func() { + It("should populate the status on a response if one is not provided", func(ctx SpecContext) { By("setting up a webhook") webhook := allowHandler() By("invoking the webhook") - resp := webhook.Handle(context.Background(), Request{}) + resp := webhook.Handle(ctx, Request{}) By("checking that the response share's the request's UID") Expect(resp.Result).To(Equal(&metav1.Status{Code: http.StatusOK})) }) - It("shouldn't overwrite the status on a response", func() { + It("shouldn't overwrite the status on a response", func(ctx SpecContext) { By("setting up a webhook that sets a status") webhook := &Webhook{ Handler: HandlerFunc(func(ctx context.Context, req Request) Response { return Response{ - AdmissionResponse: admissionv1beta1.AdmissionResponse{ + AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: true, Result: &metav1.Status{Message: "Ground Control to Major Tom"}, }, @@ -97,14 +111,14 @@ var _ = Describe("Admission Webhooks", func() { } By("invoking the webhook") - resp := webhook.Handle(context.Background(), Request{}) + resp := webhook.Handle(ctx, Request{}) By("checking that the message is intact") Expect(resp.Result).NotTo(BeNil()) Expect(resp.Result.Message).To(Equal("Ground Control to Major Tom")) }) - It("should serialize patch operations into a single jsonpatch blob", func() { + It("should serialize patch operations into a single jsonpatch blob", func(ctx SpecContext) { By("setting up a webhook with a patching handler") webhook := &Webhook{ Handler: HandlerFunc(func(ctx context.Context, req Request) Response { @@ -112,100 +126,171 @@ var _ = Describe("Admission Webhooks", func() { }), } - By("invoking the webhoook") - resp := webhook.Handle(context.Background(), Request{}) + By("invoking the webhook") + resp := webhook.Handle(ctx, Request{}) By("checking that a JSON patch is populated on the response") - patchType := admissionv1beta1.PatchTypeJSONPatch + patchType := admissionv1.PatchTypeJSONPatch Expect(resp.PatchType).To(Equal(&patchType)) Expect(resp.Patch).To(Equal([]byte(`[{"op":"add","path":"/a","value":2},{"op":"replace","path":"/b","value":4}]`))) }) - Describe("dependency injection", func() { - It("should set dependencies passed in on the handler", func() { - By("setting up a webhook and injecting it with a injection func that injects a string") - setFields := func(target interface{}) error { - inj, ok := target.(stringInjector) - if !ok { - return nil + It("should pass a request logger via the context", func(ctx SpecContext) { + By("setting up a webhook that uses the request logger") + webhook := &Webhook{ + Handler: HandlerFunc(func(ctx context.Context, req Request) Response { + logf.FromContext(ctx).Info("Received request") + + return Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + }, } + }), + log: testLogger, + } - return inj.InjectString("something") - } - handler := &fakeHandler{} - webhook := &Webhook{ - Handler: handler, + By("invoking the webhook") + resp := webhook.Handle(ctx, Request{AdmissionRequest: admissionv1.AdmissionRequest{ + UID: "test123", + Name: "foo", + Namespace: "bar", + Resource: metav1.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "deployments", + }, + UserInfo: authenticationv1.UserInfo{ + Username: "tim", + }, + }}) + Expect(resp.Allowed).To(BeTrue()) + + By("checking that the log message contains the request fields") + Eventually(logBuffer).Should(gbytes.Say(`"msg":"Received request","object":{"name":"foo","namespace":"bar"},"namespace":"bar","name":"foo","resource":{"group":"apps","version":"v1","resource":"deployments"},"user":"tim","requestID":"test123"}`)) + }) + + It("should pass a request logger created by LogConstructor via the context", func(ctx SpecContext) { + By("setting up a webhook that uses the request logger") + webhook := &Webhook{ + Handler: HandlerFunc(func(ctx context.Context, req Request) Response { + logf.FromContext(ctx).Info("Received request") + + return Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + }, + } + }), + LogConstructor: func(base logr.Logger, req *Request) logr.Logger { + return base.WithValues("operation", req.Operation, "requestID", req.UID) + }, + log: testLogger, + } + + By("invoking the webhook") + resp := webhook.Handle(ctx, Request{AdmissionRequest: admissionv1.AdmissionRequest{ + UID: "test123", + Operation: admissionv1.Create, + }}) + Expect(resp.Allowed).To(BeTrue()) + + By("checking that the log message contains the request fields") + Eventually(logBuffer).Should(gbytes.Say(`"msg":"Received request","operation":"CREATE","requestID":"test123"}`)) + }) + + Describe("panic recovery", func() { + It("should recover panic if RecoverPanic is true by default", func(ctx SpecContext) { + panicHandler := func() *Webhook { + handler := &fakeHandler{ + fn: func(ctx context.Context, req Request) Response { + panic("fake panic test") + }, + } + webhook := &Webhook{ + Handler: handler, + // RecoverPanic defaults to true. + } + + return webhook } - Expect(setFields(webhook)).To(Succeed()) - Expect(inject.InjectorInto(setFields, webhook)).To(BeTrue()) - By("checking that the string was injected") - Expect(handler.injectedString).To(Equal("something")) + By("setting up a webhook with a panicking handler") + webhook := panicHandler() + + By("invoking the webhook") + resp := webhook.Handle(ctx, Request{}) + + By("checking that it errored the request") + Expect(resp.Allowed).To(BeFalse()) + Expect(resp.Result.Code).To(Equal(int32(http.StatusInternalServerError))) + Expect(resp.Result.Message).To(Equal("panic: fake panic test [recovered]")) }) - It("should inject a decoder into the handler", func() { - By("setting up a webhook and injecting it with a injection func that injects a scheme") - setFields := func(target interface{}) error { - if _, err := inject.SchemeInto(runtime.NewScheme(), target); err != nil { - return err + It("should recover panic if RecoverPanic is true", func(ctx SpecContext) { + panicHandler := func() *Webhook { + handler := &fakeHandler{ + fn: func(ctx context.Context, req Request) Response { + panic("fake panic test") + }, } - return nil - } - handler := &fakeHandler{} - webhook := &Webhook{ - Handler: handler, + webhook := &Webhook{ + Handler: handler, + RecoverPanic: ptr.To[bool](true), + } + + return webhook } - Expect(setFields(webhook)).To(Succeed()) - Expect(inject.InjectorInto(setFields, webhook)).To(BeTrue()) - By("checking that the decoder was injected") - Expect(handler.decoder).NotTo(BeNil()) + By("setting up a webhook with a panicking handler") + webhook := panicHandler() + + By("invoking the webhook") + resp := webhook.Handle(ctx, Request{}) + + By("checking that it errored the request") + Expect(resp.Allowed).To(BeFalse()) + Expect(resp.Result.Code).To(Equal(int32(http.StatusInternalServerError))) + Expect(resp.Result.Message).To(Equal("panic: fake panic test [recovered]")) }) - It("should pass a setFields that also injects a decoder into sub-dependencies", func() { - By("setting up a webhook and injecting it with a injection func that injects a scheme") - setFields := func(target interface{}) error { - if _, err := inject.SchemeInto(runtime.NewScheme(), target); err != nil { - return err + It("should not recover panic if RecoverPanic is false", func(ctx SpecContext) { + panicHandler := func() *Webhook { + handler := &fakeHandler{ + fn: func(ctx context.Context, req Request) Response { + panic("fake panic test") + }, } - return nil - } - handler := &handlerWithSubDependencies{ - Handler: HandlerFunc(func(ctx context.Context, req Request) Response { - return Response{} - }), - dep: &subDep{}, - } - webhook := &Webhook{ - Handler: handler, + webhook := &Webhook{ + Handler: handler, + RecoverPanic: ptr.To[bool](false), + } + + return webhook } - Expect(setFields(webhook)).To(Succeed()) - Expect(inject.InjectorInto(setFields, webhook)).To(BeTrue()) - By("checking that setFields sets the decoder as well") - Expect(handler.dep.decoder).NotTo(BeNil()) + By("setting up a webhook with a panicking handler") + defer func() { + Expect(recover()).ShouldNot(BeNil()) + }() + webhook := panicHandler() + + By("invoking the webhook") + webhook.Handle(ctx, Request{}) }) }) }) -type stringInjector interface { - InjectString(s string) error -} - -type handlerWithSubDependencies struct { - Handler - dep *subDep -} - -func (h *handlerWithSubDependencies) InjectFunc(f inject.Func) error { - return f(h.dep) -} +var _ = It("Should be able to write/read admission.Request to/from context", func(specContext SpecContext) { + testRequest := Request{ + admissionv1.AdmissionRequest{ + UID: "test-uid", + }, + } -type subDep struct { - decoder *Decoder -} + ctx := NewContextWithRequest(specContext, testRequest) -func (d *subDep) InjectDecoder(dec *Decoder) error { - d.decoder = dec - return nil -} + gotRequest, err := RequestFromContext(ctx) + Expect(err).To(Not(HaveOccurred())) + Expect(gotRequest).To(Equal(testRequest)) +}) diff --git a/pkg/webhook/alias.go b/pkg/webhook/alias.go index 27ed60ad18..2882e7bab3 100644 --- a/pkg/webhook/alias.go +++ b/pkg/webhook/alias.go @@ -17,17 +17,17 @@ limitations under the License. package webhook import ( - "github.com/appscode/jsonpatch" + "gomodules.xyz/jsonpatch/v2" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) // define some aliases for common bits of the webhook functionality -// Defaulter defines functions for setting defaults on resources -type Defaulter = admission.Defaulter +// CustomDefaulter defines functions for setting defaults on resources. +type CustomDefaulter = admission.CustomDefaulter -// Validator defines functions for validating an operation -type Validator = admission.Validator +// CustomValidator defines functions for validating an operation. +type CustomValidator = admission.CustomValidator // AdmissionRequest defines the input for an admission handler. // It contains information to identify the object in diff --git a/pkg/webhook/authentication/authentication_suite_test.go b/pkg/webhook/authentication/authentication_suite_test.go new file mode 100644 index 0000000000..29f7b3e17e --- /dev/null +++ b/pkg/webhook/authentication/authentication_suite_test.go @@ -0,0 +1,36 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestAuthenticationWebhook(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Authentication Webhook Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) +}) diff --git a/pkg/runtime/doc.go b/pkg/webhook/authentication/doc.go similarity index 66% rename from pkg/runtime/doc.go rename to pkg/webhook/authentication/doc.go index 34101b3fa4..d2b85f378c 100644 --- a/pkg/runtime/doc.go +++ b/pkg/webhook/authentication/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,8 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package runtime contains not-quite-internal mechanisms for -// controller-runtime, plus some deprecated exports of functionality -// moved elsewhere. Most users should not need to import anything in -// pkg/runtime. -package runtime +/* +Package authentication provides implementation for authentication webhook and +methods to implement authentication webhook handlers. + +See examples/tokenreview/ for an example of authentication webhooks. +*/ +package authentication diff --git a/pkg/webhook/authentication/http.go b/pkg/webhook/authentication/http.go new file mode 100644 index 0000000000..abf95e5421 --- /dev/null +++ b/pkg/webhook/authentication/http.go @@ -0,0 +1,152 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + authenticationv1 "k8s.io/api/authentication/v1" + authenticationv1beta1 "k8s.io/api/authentication/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var authenticationScheme = runtime.NewScheme() +var authenticationCodecs = serializer.NewCodecFactory(authenticationScheme) + +// The TokenReview resource mostly contains a bearer token which +// at most should have a few KB's of size, so we picked 1 MB to +// have plenty of buffer. +// If your use case requires larger max request sizes, please +// open an issue (https://github.com/kubernetes-sigs/controller-runtime/issues/new). +const maxRequestSize = int64(1 * 1024 * 1024) + +func init() { + utilruntime.Must(authenticationv1.AddToScheme(authenticationScheme)) + utilruntime.Must(authenticationv1beta1.AddToScheme(authenticationScheme)) +} + +var _ http.Handler = &Webhook{} + +func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + if wh.WithContextFunc != nil { + ctx = wh.WithContextFunc(ctx, r) + } + + if r.Body == nil || r.Body == http.NoBody { + err := errors.New("request body is empty") + wh.getLogger(nil).Error(err, "bad request") + wh.writeResponse(w, Errored(err)) + return + } + + defer r.Body.Close() + limitedReader := &io.LimitedReader{R: r.Body, N: maxRequestSize} + body, err := io.ReadAll(limitedReader) + if err != nil { + wh.getLogger(nil).Error(err, "unable to read the body from the incoming request") + wh.writeResponse(w, Errored(err)) + return + } + if limitedReader.N <= 0 { + err := fmt.Errorf("request entity is too large; limit is %d bytes", maxRequestSize) + wh.getLogger(nil).Error(err, "unable to read the body from the incoming request; limit reached") + wh.writeResponse(w, Errored(err)) + return + } + + // verify the content type is accurate + if contentType := r.Header.Get("Content-Type"); contentType != "application/json" { + err := fmt.Errorf("contentType=%s, expected application/json", contentType) + wh.getLogger(nil).Error(err, "unable to process a request with unknown content type") + wh.writeResponse(w, Errored(err)) + return + } + + // Both v1 and v1beta1 TokenReview types are exactly the same, so the v1beta1 type can + // be decoded into the v1 type. The v1beta1 api is deprecated as of 1.19 and will be + // removed in authenticationv1.22. However the runtime codec's decoder guesses which type to + // decode into by type name if an Object's TypeMeta isn't set. By setting TypeMeta of an + // unregistered type to the v1 GVK, the decoder will coerce a v1beta1 TokenReview to authenticationv1. + // The actual TokenReview GVK will be used to write a typed response in case the + // webhook config permits multiple versions, otherwise this response will fail. + req := Request{} + ar := unversionedTokenReview{} + // avoid an extra copy + ar.TokenReview = &req.TokenReview + ar.SetGroupVersionKind(authenticationv1.SchemeGroupVersion.WithKind("TokenReview")) + _, actualTokRevGVK, err := authenticationCodecs.UniversalDeserializer().Decode(body, nil, &ar) + if err != nil { + wh.getLogger(nil).Error(err, "unable to decode the request") + wh.writeResponse(w, Errored(err)) + return + } + wh.getLogger(&req).V(5).Info("received request") + + if req.Spec.Token == "" { + err := errors.New("token is empty") + wh.getLogger(&req).Error(err, "bad request") + wh.writeResponse(w, Errored(err)) + return + } + + wh.writeResponseTyped(w, wh.Handle(ctx, req), actualTokRevGVK) +} + +// writeResponse writes response to w generically, i.e. without encoding GVK information. +func (wh *Webhook) writeResponse(w io.Writer, response Response) { + wh.writeTokenResponse(w, response.TokenReview) +} + +// writeResponseTyped writes response to w with GVK set to tokRevGVK, which is necessary +// if multiple TokenReview versions are permitted by the webhook. +func (wh *Webhook) writeResponseTyped(w io.Writer, response Response, tokRevGVK *schema.GroupVersionKind) { + ar := response.TokenReview + + // Default to a v1 TokenReview, otherwise the API server may not recognize the request + // if multiple TokenReview versions are permitted by the webhook config. + if tokRevGVK == nil || *tokRevGVK == (schema.GroupVersionKind{}) { + ar.SetGroupVersionKind(authenticationv1.SchemeGroupVersion.WithKind("TokenReview")) + } else { + ar.SetGroupVersionKind(*tokRevGVK) + } + wh.writeTokenResponse(w, ar) +} + +// writeTokenResponse writes ar to w. +func (wh *Webhook) writeTokenResponse(w io.Writer, ar authenticationv1.TokenReview) { + if err := json.NewEncoder(w).Encode(ar); err != nil { + wh.getLogger(nil).Error(err, "unable to encode the response") + wh.writeResponse(w, Errored(err)) + } + res := ar + wh.getLogger(nil).V(5).Info("wrote response", "requestID", res.UID, "authenticated", res.Status.Authenticated) +} + +// unversionedTokenReview is used to decode both v1 and v1beta1 TokenReview types. +type unversionedTokenReview struct { + *authenticationv1.TokenReview +} + +var _ runtime.Object = &unversionedTokenReview{} diff --git a/pkg/webhook/authentication/http_test.go b/pkg/webhook/authentication/http_test.go new file mode 100644 index 0000000000..e51b2af7e6 --- /dev/null +++ b/pkg/webhook/authentication/http_test.go @@ -0,0 +1,235 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + "bytes" + "context" + "crypto/rand" + "fmt" + "io" + "net/http" + "net/http/httptest" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + authenticationv1 "k8s.io/api/authentication/v1" +) + +var _ = Describe("Authentication Webhooks", func() { + + const ( + gvkJSONv1 = `"kind":"TokenReview","apiVersion":"authentication.k8s.io/v1"` + ) + + Describe("HTTP Handler", func() { + var respRecorder *httptest.ResponseRecorder + webhook := &Webhook{ + Handler: nil, + } + BeforeEach(func() { + respRecorder = &httptest.ResponseRecorder{ + Body: bytes.NewBuffer(nil), + } + }) + + It("should return bad-request when given an empty body", func() { + req := &http.Request{Body: nil} + + expected := `{"metadata":{},"spec":{},"status":{"user":{},"error":"request body is empty"}} +` + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(BeComparableTo(expected)) + }) + + It("should return bad-request when given the wrong content-type", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/foo"}}, + Method: http.MethodPost, + Body: nopCloser{Reader: bytes.NewBuffer(nil)}, + } + + expected := `{"metadata":{},"spec":{},"status":{"user":{},"error":"contentType=application/foo, expected application/json"}} +` + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should return bad-request when given an undecodable body", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Method: http.MethodPost, + Body: nopCloser{Reader: bytes.NewBufferString("{")}, + } + + expected := `{"metadata":{},"spec":{},"status":{"user":{},"error":"couldn't get version/kind; json parse error: unexpected end of JSON input"}} +` + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should return bad-request when given an undecodable body", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Method: http.MethodPost, + Body: nopCloser{Reader: bytes.NewBufferString(`{"spec":{"token":""}}`)}, + } + + expected := `{"metadata":{},"spec":{},"status":{"user":{},"error":"token is empty"}} +` + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should error when given a NoBody", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Method: http.MethodPost, + Body: http.NoBody, + } + + expected := `{"metadata":{},"spec":{},"status":{"user":{},"error":"request body is empty"}} +` + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should error when given an infinite body", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Method: http.MethodPost, + Body: nopCloser{Reader: rand.Reader}, + } + + expected := `{"metadata":{},"spec":{},"status":{"user":{},"error":"request entity is too large; limit is 1048576 bytes"}} +` + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should return the response given by the handler with version defaulted to v1", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Method: http.MethodPost, + Body: nopCloser{Reader: bytes.NewBufferString(`{"spec":{"token":"foobar"}}`)}, + } + webhook := &Webhook{ + Handler: &fakeHandler{}, + } + + expected := fmt.Sprintf(`{%s,"metadata":{},"spec":{},"status":{"authenticated":true,"user":{}}} +`, gvkJSONv1) + + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should return the v1 response given by the handler", func() { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Method: http.MethodPost, + Body: nopCloser{Reader: bytes.NewBufferString(fmt.Sprintf(`{%s,"spec":{"token":"foobar"}}`, gvkJSONv1))}, + } + webhook := &Webhook{ + Handler: &fakeHandler{}, + } + + expected := fmt.Sprintf(`{%s,"metadata":{},"spec":{},"status":{"authenticated":true,"user":{}}} +`, gvkJSONv1) + webhook.ServeHTTP(respRecorder, req) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should present the Context from the HTTP request, if any", func(specContext SpecContext) { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Method: http.MethodPost, + Body: nopCloser{Reader: bytes.NewBufferString(`{"spec":{"token":"foobar"}}`)}, + } + type ctxkey int + const key ctxkey = 1 + const value = "from-ctx" + webhook := &Webhook{ + Handler: &fakeHandler{ + fn: func(ctx context.Context, req Request) Response { + <-ctx.Done() + return Authenticated(ctx.Value(key).(string), authenticationv1.UserInfo{}) + }, + }, + } + + expected := fmt.Sprintf(`{%s,"metadata":{},"spec":{},"status":{"authenticated":true,"user":{},"error":%q}} +`, gvkJSONv1, value) + + ctx, cancel := context.WithCancel(context.WithValue(specContext, key, value)) + cancel() + webhook.ServeHTTP(respRecorder, req.WithContext(ctx)) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + + It("should mutate the Context from the HTTP request, if func supplied", func(specContext SpecContext) { + req := &http.Request{ + Header: http.Header{"Content-Type": []string{"application/json"}}, + Method: http.MethodPost, + Body: nopCloser{Reader: bytes.NewBufferString(`{"spec":{"token":"foobar"}}`)}, + } + type ctxkey int + const key ctxkey = 1 + webhook := &Webhook{ + Handler: &fakeHandler{ + fn: func(ctx context.Context, req Request) Response { + return Authenticated(ctx.Value(key).(string), authenticationv1.UserInfo{}) + }, + }, + WithContextFunc: func(ctx context.Context, r *http.Request) context.Context { + return context.WithValue(ctx, key, r.Header["Content-Type"][0]) + }, + } + + expected := fmt.Sprintf(`{%s,"metadata":{},"spec":{},"status":{"authenticated":true,"user":{},"error":%q}} +`, gvkJSONv1, "application/json") + + ctx, cancel := context.WithCancel(specContext) + cancel() + webhook.ServeHTTP(respRecorder, req.WithContext(ctx)) + Expect(respRecorder.Body.String()).To(Equal(expected)) + }) + }) +}) + +type nopCloser struct { + io.Reader +} + +func (nopCloser) Close() error { return nil } + +type fakeHandler struct { + invoked bool + fn func(context.Context, Request) Response +} + +func (h *fakeHandler) Handle(ctx context.Context, req Request) Response { + h.invoked = true + if h.fn != nil { + return h.fn(ctx, req) + } + return Response{TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: true, + }, + }} +} diff --git a/pkg/webhook/authentication/response.go b/pkg/webhook/authentication/response.go new file mode 100644 index 0000000000..3e1d362049 --- /dev/null +++ b/pkg/webhook/authentication/response.go @@ -0,0 +1,63 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + authenticationv1 "k8s.io/api/authentication/v1" +) + +// Authenticated constructs a response indicating that the given token +// is valid. +func Authenticated(reason string, user authenticationv1.UserInfo) Response { + return ReviewResponse(true, user, reason) +} + +// Unauthenticated constructs a response indicating that the given token +// is not valid. +func Unauthenticated(reason string, user authenticationv1.UserInfo) Response { + return ReviewResponse(false, authenticationv1.UserInfo{}, reason) +} + +// Errored creates a new Response for error-handling a request. +func Errored(err error) Response { + return Response{ + TokenReview: authenticationv1.TokenReview{ + Spec: authenticationv1.TokenReviewSpec{}, + Status: authenticationv1.TokenReviewStatus{ + Authenticated: false, + Error: err.Error(), + }, + }, + } +} + +// ReviewResponse returns a response for admitting a request. +func ReviewResponse(authenticated bool, user authenticationv1.UserInfo, err string, audiences ...string) Response { + resp := Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: authenticated, + User: user, + Audiences: audiences, + }, + }, + } + if len(err) > 0 { + resp.TokenReview.Status.Error = err + } + return resp +} diff --git a/pkg/webhook/authentication/response_test.go b/pkg/webhook/authentication/response_test.go new file mode 100644 index 0000000000..6eeef87c11 --- /dev/null +++ b/pkg/webhook/authentication/response_test.go @@ -0,0 +1,160 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + "errors" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + authenticationv1 "k8s.io/api/authentication/v1" +) + +var _ = Describe("Authentication Webhook Response Helpers", func() { + Describe("Authenticated", func() { + It("should return an 'allowed' response", func() { + Expect(Authenticated("", authenticationv1.UserInfo{})).To(Equal( + Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: true, + User: authenticationv1.UserInfo{}, + }, + }, + }, + )) + }) + + It("should populate a status with a reason when a reason is given", func() { + Expect(Authenticated("acceptable", authenticationv1.UserInfo{})).To(Equal( + Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: true, + User: authenticationv1.UserInfo{}, + Error: "acceptable", + }, + }, + }, + )) + }) + }) + + Describe("Unauthenticated", func() { + It("should return a 'not allowed' response", func() { + Expect(Unauthenticated("", authenticationv1.UserInfo{})).To(Equal( + Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: false, + User: authenticationv1.UserInfo{}, + Error: "", + }, + }, + }, + )) + }) + + It("should populate a status with a reason when a reason is given", func() { + Expect(Unauthenticated("UNACCEPTABLE!", authenticationv1.UserInfo{})).To(Equal( + Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: false, + User: authenticationv1.UserInfo{}, + Error: "UNACCEPTABLE!", + }, + }, + }, + )) + }) + }) + + Describe("Errored", func() { + It("should return a unauthenticated response with an error", func() { + err := errors.New("this is an error") + expected := Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: false, + User: authenticationv1.UserInfo{}, + Error: err.Error(), + }, + }, + } + resp := Errored(err) + Expect(resp).To(Equal(expected)) + }) + }) + + Describe("ReviewResponse", func() { + It("should populate a status with a Error when a reason is given", func() { + By("checking that a message is populated for 'allowed' responses") + Expect(ReviewResponse(true, authenticationv1.UserInfo{}, "acceptable")).To(Equal( + Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: true, + User: authenticationv1.UserInfo{}, + Error: "acceptable", + }, + }, + }, + )) + + By("checking that a message is populated for 'Unauthenticated' responses") + Expect(ReviewResponse(false, authenticationv1.UserInfo{}, "UNACCEPTABLE!")).To(Equal( + Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: false, + User: authenticationv1.UserInfo{}, + Error: "UNACCEPTABLE!", + }, + }, + }, + )) + }) + + It("should return an authentication decision", func() { + By("checking that it returns an 'allowed' response when allowed is true") + Expect(ReviewResponse(true, authenticationv1.UserInfo{}, "")).To(Equal( + Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: true, + User: authenticationv1.UserInfo{}, + }, + }, + }, + )) + + By("checking that it returns an 'Unauthenticated' response when allowed is false") + Expect(ReviewResponse(false, authenticationv1.UserInfo{}, "")).To(Equal( + Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: false, + User: authenticationv1.UserInfo{}, + }, + }, + }, + )) + }) + }) +}) diff --git a/pkg/webhook/authentication/webhook.go b/pkg/webhook/authentication/webhook.go new file mode 100644 index 0000000000..5a0cd4cd25 --- /dev/null +++ b/pkg/webhook/authentication/webhook.go @@ -0,0 +1,126 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + "context" + "errors" + "net/http" + "sync" + + "github.com/go-logr/logr" + authenticationv1 "k8s.io/api/authentication/v1" + "k8s.io/klog/v2" + + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var ( + errUnableToEncodeResponse = errors.New("unable to encode response") +) + +// Request defines the input for an authentication handler. +// It contains information to identify the object in +// question (group, version, kind, resource, subresource, +// name, namespace), as well as the operation in question +// (e.g. Get, Create, etc), and the object itself. +type Request struct { + authenticationv1.TokenReview +} + +// Response is the output of an authentication handler. +// It contains a response indicating if a given +// operation is allowed. +type Response struct { + authenticationv1.TokenReview +} + +// Complete populates any fields that are yet to be set in +// the underlying TokenResponse, It mutates the response. +func (r *Response) Complete(req Request) error { + r.UID = req.UID + + return nil +} + +// Handler can handle an TokenReview. +type Handler interface { + // Handle yields a response to an TokenReview. + // + // The supplied context is extracted from the received http.Request, allowing wrapping + // http.Handlers to inject values into and control cancelation of downstream request processing. + Handle(context.Context, Request) Response +} + +// HandlerFunc implements Handler interface using a single function. +type HandlerFunc func(context.Context, Request) Response + +var _ Handler = HandlerFunc(nil) + +// Handle process the TokenReview by invoking the underlying function. +func (f HandlerFunc) Handle(ctx context.Context, req Request) Response { + return f(ctx, req) +} + +// Webhook represents each individual webhook. +type Webhook struct { + // Handler actually processes an authentication request returning whether it was authenticated or unauthenticated, + // and potentially patches to apply to the handler. + Handler Handler + + // WithContextFunc will allow you to take the http.Request.Context() and + // add any additional information such as passing the request path or + // headers thus allowing you to read them from within the handler + WithContextFunc func(context.Context, *http.Request) context.Context + + setupLogOnce sync.Once + log logr.Logger +} + +// Handle processes TokenReview. +func (wh *Webhook) Handle(ctx context.Context, req Request) Response { + resp := wh.Handler.Handle(ctx, req) + if err := resp.Complete(req); err != nil { + wh.getLogger(&req).Error(err, "unable to encode response") + return Errored(errUnableToEncodeResponse) + } + + return resp +} + +// getLogger constructs a logger from the injected log and LogConstructor. +func (wh *Webhook) getLogger(req *Request) logr.Logger { + wh.setupLogOnce.Do(func() { + if wh.log.GetSink() == nil { + wh.log = logf.Log.WithName("authentication") + } + }) + + return logConstructor(wh.log, req) +} + +// logConstructor adds some commonly interesting fields to the given logger. +func logConstructor(base logr.Logger, req *Request) logr.Logger { + if req != nil { + return base.WithValues("object", klog.KRef(req.Namespace, req.Name), + "namespace", req.Namespace, "name", req.Name, + "user", req.Status.User.Username, + "requestID", req.UID, + ) + } + return base +} diff --git a/pkg/webhook/authentication/webhook_test.go b/pkg/webhook/authentication/webhook_test.go new file mode 100644 index 0000000000..22c4e284cd --- /dev/null +++ b/pkg/webhook/authentication/webhook_test.go @@ -0,0 +1,106 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + machinerytypes "k8s.io/apimachinery/pkg/types" +) + +var _ = Describe("Authentication Webhooks", func() { + allowHandler := func() *Webhook { + handler := &fakeHandler{ + fn: func(ctx context.Context, req Request) Response { + return Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: true, + }, + }, + } + }, + } + webhook := &Webhook{ + Handler: handler, + } + + return webhook + } + + It("should invoke the handler to get a response", func(ctx SpecContext) { + By("setting up a webhook with an allow handler") + webhook := allowHandler() + + By("invoking the webhook") + resp := webhook.Handle(ctx, Request{}) + + By("checking that it allowed the request") + Expect(resp.Status.Authenticated).To(BeTrue()) + }) + + It("should ensure that the response's UID is set to the request's UID", func(ctx SpecContext) { + By("setting up a webhook") + webhook := allowHandler() + + By("invoking the webhook") + resp := webhook.Handle(ctx, Request{TokenReview: authenticationv1.TokenReview{ObjectMeta: metav1.ObjectMeta{UID: "foobar"}}}) + + By("checking that the response share's the request's UID") + Expect(resp.UID).To(Equal(machinerytypes.UID("foobar"))) + }) + + It("should populate the status on a response if one is not provided", func(ctx SpecContext) { + By("setting up a webhook") + webhook := allowHandler() + + By("invoking the webhook") + resp := webhook.Handle(ctx, Request{}) + + By("checking that the response share's the request's UID") + Expect(resp.Status).To(Equal(authenticationv1.TokenReviewStatus{Authenticated: true})) + }) + + It("shouldn't overwrite the status on a response", func(ctx SpecContext) { + By("setting up a webhook that sets a status") + webhook := &Webhook{ + Handler: HandlerFunc(func(ctx context.Context, req Request) Response { + return Response{ + TokenReview: authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: true, + Error: "Ground Control to Major Tom", + }, + }, + } + }), + } + + By("invoking the webhook") + resp := webhook.Handle(ctx, Request{}) + + By("checking that the message is intact") + Expect(resp.Status).NotTo(BeNil()) + Expect(resp.Status.Authenticated).To(BeTrue()) + Expect(resp.Status.Error).To(Equal("Ground Control to Major Tom")) + }) +}) diff --git a/pkg/webhook/conversion/conversion.go b/pkg/webhook/conversion/conversion.go index 7a74a14892..a26fa348bb 100644 --- a/pkg/webhook/conversion/conversion.go +++ b/pkg/webhook/conversion/conversion.go @@ -22,45 +22,43 @@ See pkg/conversion for interface definitions required to ensure an API Type is c package conversion import ( + "context" "encoding/json" + "errors" "fmt" "net/http" - apix "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apix "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "sigs.k8s.io/controller-runtime/pkg/conversion" logf "sigs.k8s.io/controller-runtime/pkg/log" + conversionmetrics "sigs.k8s.io/controller-runtime/pkg/webhook/conversion/metrics" ) var ( log = logf.Log.WithName("conversion-webhook") ) -// Webhook implements a CRD conversion webhook HTTP handler. -type Webhook struct { - scheme *runtime.Scheme - decoder *Decoder +func NewWebhookHandler(scheme *runtime.Scheme) http.Handler { + return &webhook{scheme: scheme, decoder: NewDecoder(scheme)} } -// InjectScheme injects a scheme into the webhook, in order to construct a Decoder. -func (wh *Webhook) InjectScheme(s *runtime.Scheme) error { - var err error - wh.scheme = s - wh.decoder, err = NewDecoder(s) - if err != nil { - return err - } - - return nil +// webhook implements a CRD conversion webhook HTTP handler. +type webhook struct { + scheme *runtime.Scheme + decoder *Decoder } // ensure Webhook implements http.Handler -var _ http.Handler = &Webhook{} +var _ http.Handler = &webhook{} + +func (wh *webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() -func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { convertReview := &apix.ConversionReview{} err := json.NewDecoder(r.Body).Decode(convertReview) if err != nil { @@ -69,9 +67,15 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } + if convertReview.Request == nil { + log.Error(nil, "conversion request is nil") + w.WriteHeader(http.StatusBadRequest) + return + } + // TODO(droot): may be move the conversion logic to a separate module to // decouple it from the http layer ? - resp, err := wh.handleConvertRequest(convertReview.Request) + resp, err := wh.handleConvertRequest(ctx, convertReview.Request) if err != nil { log.Error(err, "failed to convert", "request", convertReview.Request.UID) convertReview.Response = errored(err) @@ -89,7 +93,18 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { } // handles a version conversion request. -func (wh *Webhook) handleConvertRequest(req *apix.ConversionRequest) (*apix.ConversionResponse, error) { +func (wh *webhook) handleConvertRequest(ctx context.Context, req *apix.ConversionRequest) (_ *apix.ConversionResponse, retErr error) { + defer func() { + if r := recover(); r != nil { + conversionmetrics.WebhookPanics.WithLabelValues().Inc() + + for _, fn := range utilruntime.PanicHandlers { + fn(ctx, r) + } + retErr = errors.New("internal error occurred during conversion") + return + } + }() if req == nil { return nil, fmt.Errorf("conversion request is nil") } @@ -122,7 +137,7 @@ func (wh *Webhook) handleConvertRequest(req *apix.ConversionRequest) (*apix.Conv // convertObject will convert given a src object to dst object. // Note(droot): couldn't find a way to reduce the cyclomatic complexity under 10 // without compromising readability, so disabling gocyclo linter -func (wh *Webhook) convertObject(src, dst runtime.Object) error { +func (wh *webhook) convertObject(src, dst runtime.Object) error { srcGVK := src.GetObjectKind().GroupVersionKind() dstGVK := dst.GetObjectKind().GroupVersionKind() @@ -149,7 +164,7 @@ func (wh *Webhook) convertObject(src, dst runtime.Object) error { } } -func (wh *Webhook) convertViaHub(src, dst conversion.Convertible) error { +func (wh *webhook) convertViaHub(src, dst conversion.Convertible) error { hub, err := wh.getHub(src) if err != nil { return err @@ -161,43 +176,47 @@ func (wh *Webhook) convertViaHub(src, dst conversion.Convertible) error { err = src.ConvertTo(hub) if err != nil { - return fmt.Errorf("%T failed to convert to hub version %T : %v", src, hub, err) + return fmt.Errorf("%T failed to convert to hub version %T : %w", src, hub, err) } err = dst.ConvertFrom(hub) if err != nil { - return fmt.Errorf("%T failed to convert from hub version %T : %v", dst, hub, err) + return fmt.Errorf("%T failed to convert from hub version %T : %w", dst, hub, err) } return nil } // getHub returns an instance of the Hub for passed-in object's group/kind. -func (wh *Webhook) getHub(obj runtime.Object) (conversion.Hub, error) { - gvks, _, err := wh.scheme.ObjectKinds(obj) +func (wh *webhook) getHub(obj runtime.Object) (conversion.Hub, error) { + gvks, err := objectGVKs(wh.scheme, obj) if err != nil { - return nil, fmt.Errorf("error retriving object kinds for given object : %v", err) + return nil, err + } + if len(gvks) == 0 { + return nil, fmt.Errorf("error retrieving gvks for object : %v", obj) } var hub conversion.Hub - var isHub, hubFoundAlready bool + var hubFoundAlready bool for _, gvk := range gvks { instance, err := wh.scheme.New(gvk) if err != nil { - return nil, fmt.Errorf("failed to allocate an instance for gvk %v %v", gvk, err) + return nil, fmt.Errorf("failed to allocate an instance for gvk %v: %w", gvk, err) } - if hub, isHub = instance.(conversion.Hub); isHub { + if val, isHub := instance.(conversion.Hub); isHub { if hubFoundAlready { return nil, fmt.Errorf("multiple hub version defined for %T", obj) } hubFoundAlready = true + hub = val } } return hub, nil } // allocateDstObject returns an instance for a given GVK. -func (wh *Webhook) allocateDstObject(apiVersion, kind string) (runtime.Object, error) { +func (wh *webhook) allocateDstObject(apiVersion, kind string) (runtime.Object, error) { gvk := schema.FromAPIVersionAndKind(apiVersion, kind) obj, err := wh.scheme.New(gvk) @@ -216,21 +235,24 @@ func (wh *Webhook) allocateDstObject(apiVersion, kind string) (runtime.Object, e return obj, nil } -// CheckConvertibility determines if given type is convertible or not. For a type +// IsConvertible determines if given type is convertible or not. For a type // to be convertible, the group-kind needs to have a Hub type defined and all // non-hub types must be able to convert to/from Hub. -func CheckConvertibility(scheme *runtime.Scheme, obj runtime.Object) error { +func IsConvertible(scheme *runtime.Scheme, obj runtime.Object) (bool, error) { var hubs, spokes, nonSpokes []runtime.Object - gvks, _, err := scheme.ObjectKinds(obj) + gvks, err := objectGVKs(scheme, obj) if err != nil { - return fmt.Errorf("error retriving object kinds for given object : %v", err) + return false, err + } + if len(gvks) == 0 { + return false, fmt.Errorf("error retrieving gvks for object : %v", obj) } for _, gvk := range gvks { instance, err := scheme.New(gvk) if err != nil { - return fmt.Errorf("failed to allocate an instance for gvk %v %v", gvk, err) + return false, fmt.Errorf("failed to allocate an instance for gvk %v: %w", gvk, err) } if isHub(instance) { @@ -247,32 +269,50 @@ func CheckConvertibility(scheme *runtime.Scheme, obj runtime.Object) error { } if len(gvks) == 1 { - return nil // single version + return false, nil // single version } if len(hubs) == 0 && len(spokes) == 0 { // multiple version detected with no conversion implementation. This is // true for multi-version built-in types. - return nil + return false, nil } if len(hubs) == 1 && len(nonSpokes) == 0 { // convertible - spokeVersions := []string{} - for _, sp := range spokes { - spokeVersions = append(spokeVersions, sp.GetObjectKind().GroupVersionKind().String()) - } - log.V(1).Info("conversion enabled for kind", "kind", - gvks[0].GroupKind(), "hub", hubs[0], "spokes", spokeVersions) - return nil + return true, nil } - return PartialImplementationError{ + return false, PartialImplementationError{ hubs: hubs, nonSpokes: nonSpokes, spokes: spokes, } } +// objectGVKs returns all (Group,Version,Kind) for the Group/Kind of given object. +func objectGVKs(scheme *runtime.Scheme, obj runtime.Object) ([]schema.GroupVersionKind, error) { + // NB: we should not use `obj.GetObjectKind().GroupVersionKind()` to get the + // GVK here, since it is parsed from apiVersion and kind fields and it may + // return empty GVK if obj is an uninitialized object. + objGVKs, _, err := scheme.ObjectKinds(obj) + if err != nil { + return nil, err + } + if len(objGVKs) != 1 { + return nil, fmt.Errorf("expect to get only one GVK for %v", obj) + } + objGVK := objGVKs[0] + knownTypes := scheme.AllKnownTypes() + + var gvks []schema.GroupVersionKind + for gvk := range knownTypes { + if objGVK.GroupKind() == gvk.GroupKind() { + gvks = append(gvks, gvk) + } + } + return gvks, nil +} + // PartialImplementationError represents an error due to partial conversion // implementation such as hub without spokes, multiple hubs or spokes without hub. type PartialImplementationError struct { diff --git a/pkg/webhook/conversion/conversion_suite_test.go b/pkg/webhook/conversion/conversion_suite_test.go index def495c1fc..7ca3c48ba2 100644 --- a/pkg/webhook/conversion/conversion_suite_test.go +++ b/pkg/webhook/conversion/conversion_suite_test.go @@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -18,21 +18,18 @@ package conversion import ( "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" ) func TestConversionWebhook(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, "CRD conversion Suite", []Reporter{envtest.NewlineReporter{}}) + RunSpecs(t, "CRD conversion Suite") } -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) - - close(done) +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) }) diff --git a/pkg/webhook/conversion/conversion_test.go b/pkg/webhook/conversion/conversion_test.go index 6b08a1ae03..489689bccb 100644 --- a/pkg/webhook/conversion/conversion_test.go +++ b/pkg/webhook/conversion/conversion_test.go @@ -14,49 +14,51 @@ See the License for the specific language governing permissions and limitations under the License. */ -package conversion +package conversion_test import ( "bytes" "encoding/json" - "io/ioutil" + "io" "net/http" "net/http/httptest" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" appsv1beta1 "k8s.io/api/apps/v1beta1" - apix "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apix "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" kscheme "k8s.io/client-go/kubernetes/scheme" - jobsapis "sigs.k8s.io/controller-runtime/examples/conversion/pkg/apis" - jobsv1 "sigs.k8s.io/controller-runtime/examples/conversion/pkg/apis/jobs/v1" - jobsv2 "sigs.k8s.io/controller-runtime/examples/conversion/pkg/apis/jobs/v2" + "sigs.k8s.io/controller-runtime/pkg/webhook/conversion" + jobsv1 "sigs.k8s.io/controller-runtime/pkg/webhook/conversion/testdata/api/v1" + jobsv2 "sigs.k8s.io/controller-runtime/pkg/webhook/conversion/testdata/api/v2" + jobsv3 "sigs.k8s.io/controller-runtime/pkg/webhook/conversion/testdata/api/v3" ) var _ = Describe("Conversion Webhook", func() { var respRecorder *httptest.ResponseRecorder - var decoder *Decoder + var decoder *conversion.Decoder var scheme *runtime.Scheme - webhook := Webhook{} + var wh http.Handler BeforeEach(func() { respRecorder = &httptest.ResponseRecorder{ Body: bytes.NewBuffer(nil), } - scheme = kscheme.Scheme - Expect(jobsapis.AddToScheme(scheme)).To(Succeed()) - Expect(webhook.InjectScheme(scheme)).To(Succeed()) - - var err error - decoder, err = NewDecoder(scheme) - Expect(err).NotTo(HaveOccurred()) + scheme = runtime.NewScheme() + Expect(kscheme.AddToScheme(scheme)).To(Succeed()) + Expect(jobsv1.AddToScheme(scheme)).To(Succeed()) + Expect(jobsv2.AddToScheme(scheme)).To(Succeed()) + Expect(jobsv3.AddToScheme(scheme)).To(Succeed()) + decoder = conversion.NewDecoder(scheme) + wh = conversion.NewWebhookHandler(scheme) }) doRequest := func(convReq *apix.ConversionReview) *apix.ConversionReview { @@ -66,9 +68,9 @@ var _ = Describe("Conversion Webhook", func() { convReview := &apix.ConversionReview{} req := &http.Request{ - Body: ioutil.NopCloser(bytes.NewReader(payload.Bytes())), + Body: io.NopCloser(bytes.NewReader(payload.Bytes())), } - webhook.ServeHTTP(respRecorder, req) + wh.ServeHTTP(respRecorder, req) Expect(json.NewDecoder(respRecorder.Result().Body).Decode(convReview)).To(Succeed()) return convReview } @@ -77,7 +79,7 @@ var _ = Describe("Conversion Webhook", func() { return &jobsv1.ExternalJob{ TypeMeta: metav1.TypeMeta{ Kind: "ExternalJob", - APIVersion: "jobs.example.org/v1", + APIVersion: "jobs.testprojects.kb.io/v1", }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", @@ -89,14 +91,30 @@ var _ = Describe("Conversion Webhook", func() { } } - It("should convert objects successfully", func() { + makeV2Obj := func() *jobsv2.ExternalJob { + return &jobsv2.ExternalJob{ + TypeMeta: metav1.TypeMeta{ + Kind: "ExternalJob", + APIVersion: "jobs.testprojects.kb.io/v2", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "obj-1", + }, + Spec: jobsv2.ExternalJobSpec{ + ScheduleAt: "every 2 seconds", + }, + } + } + + It("should convert spoke to hub successfully", func() { v1Obj := makeV1Obj() expected := &jobsv2.ExternalJob{ TypeMeta: metav1.TypeMeta{ Kind: "ExternalJob", - APIVersion: "jobs.example.org/v2", + APIVersion: "jobs.testprojects.kb.io/v2", }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", @@ -110,7 +128,85 @@ var _ = Describe("Conversion Webhook", func() { convReq := &apix.ConversionReview{ TypeMeta: metav1.TypeMeta{}, Request: &apix.ConversionRequest{ - DesiredAPIVersion: "jobs.example.org/v2", + DesiredAPIVersion: "jobs.testprojects.kb.io/v2", + Objects: []runtime.RawExtension{ + { + Object: v1Obj, + }, + }, + }, + } + + convReview := doRequest(convReq) + + Expect(convReview.Response.ConvertedObjects).To(HaveLen(1)) + Expect(convReview.Response.Result.Status).To(Equal(metav1.StatusSuccess)) + got, _, err := decoder.Decode(convReview.Response.ConvertedObjects[0].Raw) + Expect(err).NotTo(HaveOccurred()) + Expect(got).To(Equal(expected)) + }) + + It("should convert hub to spoke successfully", func() { + + v2Obj := makeV2Obj() + + expected := &jobsv1.ExternalJob{ + TypeMeta: metav1.TypeMeta{ + Kind: "ExternalJob", + APIVersion: "jobs.testprojects.kb.io/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "obj-1", + }, + Spec: jobsv1.ExternalJobSpec{ + RunAt: "every 2 seconds", + }, + } + + convReq := &apix.ConversionReview{ + TypeMeta: metav1.TypeMeta{}, + Request: &apix.ConversionRequest{ + DesiredAPIVersion: "jobs.testprojects.kb.io/v1", + Objects: []runtime.RawExtension{ + { + Object: v2Obj, + }, + }, + }, + } + + convReview := doRequest(convReq) + + Expect(convReview.Response.ConvertedObjects).To(HaveLen(1)) + Expect(convReview.Response.Result.Status).To(Equal(metav1.StatusSuccess)) + got, _, err := decoder.Decode(convReview.Response.ConvertedObjects[0].Raw) + Expect(err).NotTo(HaveOccurred()) + Expect(got).To(Equal(expected)) + }) + + It("should convert spoke to spoke successfully", func() { + + v1Obj := makeV1Obj() + + expected := &jobsv3.ExternalJob{ + TypeMeta: metav1.TypeMeta{ + Kind: "ExternalJob", + APIVersion: "jobs.testprojects.kb.io/v3", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "obj-1", + }, + Spec: jobsv3.ExternalJobSpec{ + DeferredAt: "every 2 seconds", + }, + } + + convReq := &apix.ConversionReview{ + TypeMeta: metav1.TypeMeta{}, + Request: &apix.ConversionRequest{ + DesiredAPIVersion: "jobs.testprojects.kb.io/v3", Objects: []runtime.RawExtension{ { Object: v1Obj, @@ -156,7 +252,7 @@ var _ = Describe("Conversion Webhook", func() { convReq := &apix.ConversionReview{ TypeMeta: metav1.TypeMeta{}, Request: &apix.ConversionRequest{ - DesiredAPIVersion: "jobs.example.org/v1", + DesiredAPIVersion: "jobs.testprojects.kb.io/v1", Objects: []runtime.RawExtension{ { Object: v1Obj, @@ -200,32 +296,79 @@ var _ = Describe("Conversion Webhook", func() { Expect(convReview.Response.ConvertedObjects).To(BeEmpty()) }) + It("should return error on panic in conversion", func() { + + v1Obj := makeV1Obj() + v1Obj.Spec.PanicInConversion = true + + convReq := &apix.ConversionReview{ + TypeMeta: metav1.TypeMeta{}, + Request: &apix.ConversionRequest{ + DesiredAPIVersion: "jobs.testprojects.kb.io/v3", + Objects: []runtime.RawExtension{ + { + Object: v1Obj, + }, + }, + }, + } + + convReview := doRequest(convReq) + + Expect(convReview.Response.ConvertedObjects).To(HaveLen(0)) + Expect(convReview.Response.Result.Status).To(Equal(metav1.StatusFailure)) + Expect(convReview.Response.Result.Message).To(Equal("internal error occurred during conversion")) + }) }) -var _ = Describe("Convertibility Check", func() { +var _ = Describe("IsConvertible", func() { var scheme *runtime.Scheme BeforeEach(func() { + scheme = runtime.NewScheme() - scheme = kscheme.Scheme - Expect(jobsapis.AddToScheme(scheme)).To(Succeed()) + Expect(kscheme.AddToScheme(scheme)).To(Succeed()) + Expect(jobsv1.AddToScheme(scheme)).To(Succeed()) + Expect(jobsv2.AddToScheme(scheme)).To(Succeed()) + Expect(jobsv3.AddToScheme(scheme)).To(Succeed()) + }) + + It("should not error for uninitialized types", func() { + obj := &jobsv2.ExternalJob{} + ok, err := conversion.IsConvertible(scheme, obj) + Expect(err).NotTo(HaveOccurred()) + Expect(ok).To(BeTrue()) + }) + + It("should not error for unstructured types", func() { + obj := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "ExternalJob", + "apiVersion": "jobs.testprojects.kb.io/v2", + }, + } + + ok, err := conversion.IsConvertible(scheme, obj) + Expect(err).NotTo(HaveOccurred()) + Expect(ok).To(BeTrue()) }) - It("should not return error for convertible types", func() { + It("should return true for convertible types", func() { obj := &jobsv2.ExternalJob{ TypeMeta: metav1.TypeMeta{ Kind: "ExternalJob", - APIVersion: "jobs.example.org/v2", + APIVersion: "jobs.testprojects.kb.io/v2", }, } - err := CheckConvertibility(scheme, obj) + ok, err := conversion.IsConvertible(scheme, obj) Expect(err).NotTo(HaveOccurred()) + Expect(ok).To(BeTrue()) }) - It("should not return error for a built-in multi-version type", func() { + It("should return false for a non convertible type", func() { obj := &appsv1beta1.Deployment{ TypeMeta: metav1.TypeMeta{ Kind: "Deployment", @@ -233,7 +376,8 @@ var _ = Describe("Convertibility Check", func() { }, } - err := CheckConvertibility(scheme, obj) + ok, err := conversion.IsConvertible(scheme, obj) Expect(err).NotTo(HaveOccurred()) + Expect(ok).ToNot(BeTrue()) }) }) diff --git a/pkg/webhook/conversion/decoder.go b/pkg/webhook/conversion/decoder.go index 8a145cd978..b6bb8bd938 100644 --- a/pkg/webhook/conversion/decoder.go +++ b/pkg/webhook/conversion/decoder.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package conversion import ( @@ -14,8 +30,11 @@ type Decoder struct { } // NewDecoder creates a Decoder given the runtime.Scheme -func NewDecoder(scheme *runtime.Scheme) (*Decoder, error) { - return &Decoder{codecs: serializer.NewCodecFactory(scheme)}, nil +func NewDecoder(scheme *runtime.Scheme) *Decoder { + if scheme == nil { + panic("scheme should never be nil") + } + return &Decoder{codecs: serializer.NewCodecFactory(scheme)} } // Decode decodes the inlined object. diff --git a/pkg/webhook/conversion/metrics/metrics.go b/pkg/webhook/conversion/metrics/metrics.go new file mode 100644 index 0000000000..c825f17f0b --- /dev/null +++ b/pkg/webhook/conversion/metrics/metrics.go @@ -0,0 +1,39 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +var ( + // WebhookPanics is a prometheus counter metrics which holds the total + // number of panics from conversion webhooks. + WebhookPanics = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "controller_runtime_conversion_webhook_panics_total", + Help: "Total number of conversion webhook panics", + }, []string{}) +) + +func init() { + metrics.Registry.MustRegister( + WebhookPanics, + ) + // Init metric. + WebhookPanics.WithLabelValues().Add(0) +} diff --git a/pkg/webhook/conversion/testdata/.gitignore b/pkg/webhook/conversion/testdata/.gitignore new file mode 100644 index 0000000000..d97ffc5159 --- /dev/null +++ b/pkg/webhook/conversion/testdata/.gitignore @@ -0,0 +1,24 @@ + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Kubernetes Generated files - skip generated files, except for vendored files + +!vendor/**/zz_generated.* + +# editor and IDE paraphernalia +.idea +*.swp +*.swo +*~ diff --git a/pkg/webhook/conversion/testdata/Makefile b/pkg/webhook/conversion/testdata/Makefile new file mode 100644 index 0000000000..2d9d3dda15 --- /dev/null +++ b/pkg/webhook/conversion/testdata/Makefile @@ -0,0 +1,64 @@ + +# Image URL to use all building/pushing image targets +IMG ?= controller:latest +# Produce CRDs that work back to Kubernetes 1.11 (no version conversion) +CRD_OPTIONS ?= "crd:trivialVersions=true" + +all: manager + +# Run tests +test: generate fmt vet manifests + go test ./api/... ./controllers/... -coverprofile cover.out + +# Build manager binary +manager: generate fmt vet + go build -o bin/manager main.go + +# Run against the configured Kubernetes cluster in ~/.kube/config +run: generate fmt vet + go run ./main.go + +# Install CRDs into a cluster +install: manifests + kubectl apply -f config/crd/bases + +# Deploy controller in the configured Kubernetes cluster in ~/.kube/config +deploy: manifests + kubectl apply -f config/crd/bases + kustomize build config/default | kubectl apply -f - + +# Generate manifests e.g. CRD, RBAC etc. +manifests: controller-gen + $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases + +# Run go fmt against code +fmt: + go fmt ./... + +# Run go vet against code +vet: + go vet ./... + +# Generate code +generate: controller-gen + $(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths=./api/... + +# Build the docker image +docker-build: test + docker build . -t ${IMG} + @echo "updating kustomize image patch file for manager resource" + sed -i'' -e 's@image: .*@image: '"${IMG}"'@' ./config/default/manager_image_patch.yaml + +# Push the docker image +docker-push: + docker push ${IMG} + +# find or download controller-gen +# download controller-gen if necessary +controller-gen: +ifeq (, $(shell which controller-gen)) + go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.2.0-beta.2 +CONTROLLER_GEN=$(shell go env GOPATH)/bin/controller-gen +else +CONTROLLER_GEN=$(shell which controller-gen) +endif diff --git a/pkg/webhook/conversion/testdata/PROJECT b/pkg/webhook/conversion/testdata/PROJECT new file mode 100644 index 0000000000..6b168dcbc1 --- /dev/null +++ b/pkg/webhook/conversion/testdata/PROJECT @@ -0,0 +1,13 @@ +version: "2" +domain: testprojects.kb.io +repo: sigs.k8s.io/controller-runtime/pkg/webhook/conversion/testdata +resources: +- group: jobs + version: v1 + kind: ExternalJob +- group: jobs + version: v2 + kind: ExternalJob +- group: jobs + version: v3 + kind: ExternalJob diff --git a/examples/conversion/pkg/apis/jobs/v1/externaljob_types.go b/pkg/webhook/conversion/testdata/api/v1/externaljob_types.go similarity index 85% rename from examples/conversion/pkg/apis/jobs/v1/externaljob_types.go rename to pkg/webhook/conversion/testdata/api/v1/externaljob_types.go index dcb137084d..c6065e1fb4 100644 --- a/examples/conversion/pkg/apis/jobs/v1/externaljob_types.go +++ b/pkg/webhook/conversion/testdata/api/v1/externaljob_types.go @@ -19,8 +19,9 @@ import ( "fmt" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/examples/conversion/pkg/apis/jobs/v2" "sigs.k8s.io/controller-runtime/pkg/conversion" + + v2 "sigs.k8s.io/controller-runtime/pkg/webhook/conversion/testdata/api/v2" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! @@ -31,6 +32,9 @@ type ExternalJobSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file RunAt string `json:"runAt"` + + // PanicInConversion triggers a panic during conversion when set to true. + PanicInConversion bool `json:"panicInConversion"` } // ExternalJobStatus defines the observed state of ExternalJob @@ -39,11 +43,9 @@ type ExternalJobStatus struct { // Important: Run "make" to regenerate code after modifying this file } -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true // ExternalJob is the Schema for the externaljobs API -// +k8s:openapi-gen=true type ExternalJob struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -52,7 +54,7 @@ type ExternalJob struct { Status ExternalJobStatus `json:"status,omitempty"` } -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true // ExternalJobList contains a list of ExternalJob type ExternalJobList struct { @@ -68,6 +70,9 @@ func init() { // ConvertTo implements conversion logic to convert to Hub type (v2.ExternalJob // in this case) func (ej *ExternalJob) ConvertTo(dst conversion.Hub) error { + if ej.Spec.PanicInConversion { + panic("PanicInConversion field set to true") + } switch t := dst.(type) { case *v2.ExternalJob: jobv2 := dst.(*v2.ExternalJob) @@ -82,6 +87,9 @@ func (ej *ExternalJob) ConvertTo(dst conversion.Hub) error { // ConvertFrom implements conversion logic to convert from Hub type (v2.ExternalJob // in this case) func (ej *ExternalJob) ConvertFrom(src conversion.Hub) error { + if ej.Spec.PanicInConversion { + panic("PanicInConversion field set to true") + } switch t := src.(type) { case *v2.ExternalJob: jobv2 := src.(*v2.ExternalJob) diff --git a/examples/conversion/pkg/apis/jobs/v1/register.go b/pkg/webhook/conversion/testdata/api/v1/groupversion_info.go similarity index 54% rename from examples/conversion/pkg/apis/jobs/v1/register.go rename to pkg/webhook/conversion/testdata/api/v1/groupversion_info.go index 8181537beb..5bbef61786 100644 --- a/examples/conversion/pkg/apis/jobs/v1/register.go +++ b/pkg/webhook/conversion/testdata/api/v1/groupversion_info.go @@ -13,14 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// NOTE: Boilerplate only. Ignore this file. - // Package v1 contains API Schema definitions for the jobs v1 API group -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=github.com/droot/crd-conversion-example/pkg/apis/jobs -// +k8s:defaulter-gen=TypeMeta -// +groupName=jobs.example.org +// +kubebuilder:object:generate=true +// +groupName=jobs.testprojects.kb.io package v1 import ( @@ -29,17 +24,12 @@ import ( ) var ( - // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: "jobs.example.org", Version: "v1"} + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "jobs.testprojects.kb.io", Version: "v1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} - // AddToScheme is required by pkg/client/... + // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = SchemeBuilder.AddToScheme ) - -// Resource is required by pkg/client/listers/... -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} diff --git a/examples/conversion/pkg/apis/jobs/v1/zz_generated.deepcopy.go b/pkg/webhook/conversion/testdata/api/v1/zz_generated.deepcopy.go similarity index 96% rename from examples/conversion/pkg/apis/jobs/v1/zz_generated.deepcopy.go rename to pkg/webhook/conversion/testdata/api/v1/zz_generated.deepcopy.go index dc2e649724..af7396abf1 100644 --- a/examples/conversion/pkg/apis/jobs/v1/zz_generated.deepcopy.go +++ b/pkg/webhook/conversion/testdata/api/v1/zz_generated.deepcopy.go @@ -1,4 +1,4 @@ -// +build !ignore_autogenerated +//go:build !ignore_autogenerated /* @@ -14,7 +14,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by main. DO NOT EDIT. + +// Code generated by controller-gen. DO NOT EDIT. package v1 @@ -29,7 +30,6 @@ func (in *ExternalJob) DeepCopyInto(out *ExternalJob) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec out.Status = in.Status - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJob. @@ -54,7 +54,7 @@ func (in *ExternalJob) DeepCopyObject() runtime.Object { func (in *ExternalJobList) DeepCopyInto(out *ExternalJobList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ExternalJob, len(*in)) @@ -62,7 +62,6 @@ func (in *ExternalJobList) DeepCopyInto(out *ExternalJobList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJobList. @@ -86,7 +85,6 @@ func (in *ExternalJobList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExternalJobSpec) DeepCopyInto(out *ExternalJobSpec) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJobSpec. @@ -102,7 +100,6 @@ func (in *ExternalJobSpec) DeepCopy() *ExternalJobSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExternalJobStatus) DeepCopyInto(out *ExternalJobStatus) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJobStatus. diff --git a/examples/conversion/pkg/apis/jobs/v2/externaljob_types.go b/pkg/webhook/conversion/testdata/api/v2/externaljob_types.go similarity index 92% rename from examples/conversion/pkg/apis/jobs/v2/externaljob_types.go rename to pkg/webhook/conversion/testdata/api/v2/externaljob_types.go index c6e75ee0e8..1f87e8a017 100644 --- a/examples/conversion/pkg/apis/jobs/v2/externaljob_types.go +++ b/pkg/webhook/conversion/testdata/api/v2/externaljob_types.go @@ -27,6 +27,9 @@ type ExternalJobSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file ScheduleAt string `json:"scheduleAt"` + + // PanicInConversion triggers a panic during conversion when set to true. + PanicInConversion bool `json:"panicInConversion"` } // ExternalJobStatus defines the observed state of ExternalJob @@ -35,11 +38,9 @@ type ExternalJobStatus struct { // Important: Run "make" to regenerate code after modifying this file } -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true // ExternalJob is the Schema for the externaljobs API -// +k8s:openapi-gen=true type ExternalJob struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -48,7 +49,7 @@ type ExternalJob struct { Status ExternalJobStatus `json:"status,omitempty"` } -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true // ExternalJobList contains a list of ExternalJob type ExternalJobList struct { diff --git a/examples/conversion/pkg/apis/jobs/v2/register.go b/pkg/webhook/conversion/testdata/api/v2/groupversion_info.go similarity index 54% rename from examples/conversion/pkg/apis/jobs/v2/register.go rename to pkg/webhook/conversion/testdata/api/v2/groupversion_info.go index 70e26e15b0..5019111a00 100644 --- a/examples/conversion/pkg/apis/jobs/v2/register.go +++ b/pkg/webhook/conversion/testdata/api/v2/groupversion_info.go @@ -13,14 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// NOTE: Boilerplate only. Ignore this file. - // Package v2 contains API Schema definitions for the jobs v2 API group -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=github.com/droot/crd-conversion-example/pkg/apis/jobs -// +k8s:defaulter-gen=TypeMeta -// +groupName=jobs.example.org +// +kubebuilder:object:generate=true +// +groupName=jobs.testprojects.kb.io package v2 import ( @@ -29,17 +24,12 @@ import ( ) var ( - // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: "jobs.example.org", Version: "v2"} + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "jobs.testprojects.kb.io", Version: "v2"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} - // AddToScheme is required by pkg/client/... + // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = SchemeBuilder.AddToScheme ) - -// Resource is required by pkg/client/listers/... -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} diff --git a/examples/conversion/pkg/apis/jobs/v2/zz_generated.deepcopy.go b/pkg/webhook/conversion/testdata/api/v2/zz_generated.deepcopy.go similarity index 96% rename from examples/conversion/pkg/apis/jobs/v2/zz_generated.deepcopy.go rename to pkg/webhook/conversion/testdata/api/v2/zz_generated.deepcopy.go index cffbbf5577..d5efd6150e 100644 --- a/examples/conversion/pkg/apis/jobs/v2/zz_generated.deepcopy.go +++ b/pkg/webhook/conversion/testdata/api/v2/zz_generated.deepcopy.go @@ -1,4 +1,4 @@ -// +build !ignore_autogenerated +//go:build !ignore_autogenerated /* @@ -14,7 +14,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by main. DO NOT EDIT. + +// Code generated by controller-gen. DO NOT EDIT. package v2 @@ -29,7 +30,6 @@ func (in *ExternalJob) DeepCopyInto(out *ExternalJob) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec out.Status = in.Status - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJob. @@ -54,7 +54,7 @@ func (in *ExternalJob) DeepCopyObject() runtime.Object { func (in *ExternalJobList) DeepCopyInto(out *ExternalJobList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ExternalJob, len(*in)) @@ -62,7 +62,6 @@ func (in *ExternalJobList) DeepCopyInto(out *ExternalJobList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJobList. @@ -86,7 +85,6 @@ func (in *ExternalJobList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExternalJobSpec) DeepCopyInto(out *ExternalJobSpec) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJobSpec. @@ -102,7 +100,6 @@ func (in *ExternalJobSpec) DeepCopy() *ExternalJobSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExternalJobStatus) DeepCopyInto(out *ExternalJobStatus) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJobStatus. diff --git a/pkg/webhook/conversion/testdata/api/v3/externaljob_types.go b/pkg/webhook/conversion/testdata/api/v3/externaljob_types.go new file mode 100644 index 0000000000..85a166b7cf --- /dev/null +++ b/pkg/webhook/conversion/testdata/api/v3/externaljob_types.go @@ -0,0 +1,102 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v3 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/conversion" + + v2 "sigs.k8s.io/controller-runtime/pkg/webhook/conversion/testdata/api/v2" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// ExternalJobSpec defines the desired state of ExternalJob +type ExternalJobSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + DeferredAt string `json:"deferredAt"` + + // PanicInConversion triggers a panic during conversion when set to true. + PanicInConversion bool `json:"panicInConversion"` +} + +// ExternalJobStatus defines the observed state of ExternalJob +type ExternalJobStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file +} + +// +kubebuilder:object:root=true + +// ExternalJob is the Schema for the externaljobs API +type ExternalJob struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ExternalJobSpec `json:"spec,omitempty"` + Status ExternalJobStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ExternalJobList contains a list of ExternalJob +type ExternalJobList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ExternalJob `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ExternalJob{}, &ExternalJobList{}) +} + +// ConvertTo implements conversion logic to convert to Hub type (v2.ExternalJob +// in this case) +func (ej *ExternalJob) ConvertTo(dst conversion.Hub) error { + if ej.Spec.PanicInConversion { + panic("PanicInConversion field set to true") + } + switch t := dst.(type) { + case *v2.ExternalJob: + jobv2 := dst.(*v2.ExternalJob) + jobv2.ObjectMeta = ej.ObjectMeta + jobv2.Spec.ScheduleAt = ej.Spec.DeferredAt + return nil + default: + return fmt.Errorf("unsupported type %v", t) + } +} + +// ConvertFrom implements conversion logic to convert from Hub type (v2.ExternalJob +// in this case) +func (ej *ExternalJob) ConvertFrom(src conversion.Hub) error { + if ej.Spec.PanicInConversion { + panic("PanicInConversion field set to true") + } + switch t := src.(type) { + case *v2.ExternalJob: + jobv2 := src.(*v2.ExternalJob) + ej.ObjectMeta = jobv2.ObjectMeta + ej.Spec.DeferredAt = jobv2.Spec.ScheduleAt + return nil + default: + return fmt.Errorf("unsupported type %v", t) + } +} diff --git a/pkg/webhook/conversion/testdata/api/v3/groupversion_info.go b/pkg/webhook/conversion/testdata/api/v3/groupversion_info.go new file mode 100644 index 0000000000..1ae8269614 --- /dev/null +++ b/pkg/webhook/conversion/testdata/api/v3/groupversion_info.go @@ -0,0 +1,35 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v3 contains API Schema definitions for the jobs v3 API group +// +kubebuilder:object:generate=true +// +groupName=jobs.testprojects.kb.io +package v3 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "jobs.testprojects.kb.io", Version: "v3"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/pkg/webhook/conversion/testdata/api/v3/zz_generated.deepcopy.go b/pkg/webhook/conversion/testdata/api/v3/zz_generated.deepcopy.go new file mode 100644 index 0000000000..d12b6910dc --- /dev/null +++ b/pkg/webhook/conversion/testdata/api/v3/zz_generated.deepcopy.go @@ -0,0 +1,113 @@ +//go:build !ignore_autogenerated + +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v3 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalJob) DeepCopyInto(out *ExternalJob) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJob. +func (in *ExternalJob) DeepCopy() *ExternalJob { + if in == nil { + return nil + } + out := new(ExternalJob) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExternalJob) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalJobList) DeepCopyInto(out *ExternalJobList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ExternalJob, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJobList. +func (in *ExternalJobList) DeepCopy() *ExternalJobList { + if in == nil { + return nil + } + out := new(ExternalJobList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExternalJobList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalJobSpec) DeepCopyInto(out *ExternalJobSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJobSpec. +func (in *ExternalJobSpec) DeepCopy() *ExternalJobSpec { + if in == nil { + return nil + } + out := new(ExternalJobSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalJobStatus) DeepCopyInto(out *ExternalJobStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalJobStatus. +func (in *ExternalJobStatus) DeepCopy() *ExternalJobStatus { + if in == nil { + return nil + } + out := new(ExternalJobStatus) + in.DeepCopyInto(out) + return out +} diff --git a/examples/conversion/pkg/apis/jobs/group.go b/pkg/webhook/conversion/testdata/hack/boilerplate.go.txt similarity index 89% rename from examples/conversion/pkg/apis/jobs/group.go rename to pkg/webhook/conversion/testdata/hack/boilerplate.go.txt index 02b401aeb6..b92001fb4e 100644 --- a/examples/conversion/pkg/apis/jobs/group.go +++ b/pkg/webhook/conversion/testdata/hack/boilerplate.go.txt @@ -11,7 +11,4 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/ - -// Package jobs contains jobs API versions -package jobs +*/ \ No newline at end of file diff --git a/pkg/webhook/conversion/testdata/main.go b/pkg/webhook/conversion/testdata/main.go new file mode 100644 index 0000000000..ea6b8275a8 --- /dev/null +++ b/pkg/webhook/conversion/testdata/main.go @@ -0,0 +1,75 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "flag" + "os" + + "k8s.io/apimachinery/pkg/runtime" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + jobsv1 "sigs.k8s.io/controller-runtime/pkg/webhook/conversion/testdata/api/v1" + jobsv2 "sigs.k8s.io/controller-runtime/pkg/webhook/conversion/testdata/api/v2" + jobsv3 "sigs.k8s.io/controller-runtime/pkg/webhook/conversion/testdata/api/v3" + // +kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +func init() { + + jobsv1.AddToScheme(scheme) + jobsv2.AddToScheme(scheme) + jobsv3.AddToScheme(scheme) + // +kubebuilder:scaffold:scheme +} + +func main() { + var metricsAddr string + var enableLeaderElection bool + flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, + "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") + flag.Parse() + + ctrl.SetLogger(zap.Logger(true)) + + mgr, err := ctrl.NewManager(context.Background(), ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + Metrics: metricsserver.Options{BindAddress: metricsAddr}, + LeaderElection: enableLeaderElection, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + // +kubebuilder:scaffold:builder + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} diff --git a/pkg/webhook/example_test.go b/pkg/webhook/example_test.go index 97d121488b..7c4f718f4c 100644 --- a/pkg/webhook/example_test.go +++ b/pkg/webhook/example_test.go @@ -18,22 +18,23 @@ package webhook_test import ( "context" + "net/http" ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" . "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) var ( - mgr ctrl.Manager -) - -func Example() { - // Build webhooks + // Build webhooks used for the various server + // configuration options + // // These handlers could be also be implementations // of the AdmissionHandler interface for more complex // implementations. - mutatingHook := &Admission{ + mutatingHook = &Admission{ Handler: admission.HandlerFunc(func(ctx context.Context, req AdmissionRequest) AdmissionResponse { return Patched("some changes", JSONPatchOp{Operation: "add", Path: "/metadata/annotations/access", Value: "granted"}, @@ -42,16 +43,27 @@ func Example() { }), } - validatingHook := &Admission{ + validatingHook = &Admission{ Handler: admission.HandlerFunc(func(ctx context.Context, req AdmissionRequest) AdmissionResponse { return Denied("none shall pass!") }), } +) + +// This example registers a webhooks to a webhook server +// that gets ran by a controller manager. +func Example() { + // Create a manager + // Note: GetConfigOrDie will os.Exit(1) w/o any message if no kube-config can be found + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{}) + if err != nil { + panic(err) + } // Create a webhook server. - hookServer := &Server{ + hookServer := NewServer(Options{ Port: 8443, - } + }) if err := mgr.Add(hookServer); err != nil { panic(err) } @@ -61,9 +73,79 @@ func Example() { hookServer.Register("/validating", validatingHook) // Start the server by starting a previously-set-up manager - err := mgr.Start(ctrl.SetupSignalHandler()) + err = mgr.Start(ctrl.SetupSignalHandler()) + if err != nil { + // handle error + panic(err) + } +} + +// This example creates a webhook server that can be +// ran without a controller manager. +// +// Note that this assumes and requires a valid TLS +// cert and key at the default locations +// tls.crt and tls.key. +func ExampleServer_Start() { + // Create a webhook server + hookServer := NewServer(Options{ + Port: 8443, + }) + + // Register the webhooks in the server. + hookServer.Register("/mutating", mutatingHook) + hookServer.Register("/validating", validatingHook) + + // Start the server without a manger + err := hookServer.Start(signals.SetupSignalHandler()) + if err != nil { + // handle error + panic(err) + } +} + +// This example creates a standalone webhook handler +// and runs it on a vanilla go HTTP server to demonstrate +// how you could run a webhook on an existing server +// without a controller manager. +func ExampleStandaloneWebhook() { + // Assume you have an existing HTTP server at your disposal + // configured as desired (e.g. with TLS). + // For this example just create a basic http.ServeMux + mux := http.NewServeMux() + port := ":8000" + + // Create the standalone HTTP handlers from our webhooks + mutatingHookHandler, err := admission.StandaloneWebhook(mutatingHook, admission.StandaloneOptions{ + // Logger let's you optionally pass + // a custom logger (defaults to log.Log global Logger) + Logger: logf.RuntimeLog.WithName("mutating-webhook"), + // MetricsPath let's you optionally + // provide the path it will be served on + // to be used for labelling prometheus metrics + // If none is set, prometheus metrics will not be generated. + MetricsPath: "/mutating", + }) if err != nil { // handle error panic(err) } + + validatingHookHandler, err := admission.StandaloneWebhook(validatingHook, admission.StandaloneOptions{ + Logger: logf.RuntimeLog.WithName("validating-webhook"), + MetricsPath: "/validating", + }) + if err != nil { + // handle error + panic(err) + } + + // Register the webhook handlers to your server + mux.Handle("/mutating", mutatingHookHandler) + mux.Handle("/validating", validatingHookHandler) + + // Run your handler + if err := http.ListenAndServe(port, mux); err != nil { + panic(err) + } } diff --git a/pkg/webhook/internal/certwatcher/certwatcher.go b/pkg/webhook/internal/certwatcher/certwatcher.go deleted file mode 100644 index 4a20319b50..0000000000 --- a/pkg/webhook/internal/certwatcher/certwatcher.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package certwatcher - -import ( - "crypto/tls" - "sync" - - "gopkg.in/fsnotify.v1" - logf "sigs.k8s.io/controller-runtime/pkg/internal/log" -) - -var log = logf.RuntimeLog.WithName("certwatcher") - -// CertWatcher watches certificate and key files for changes. When either file -// changes, it reads and parses both and calls an optional callback with the new -// certificate. -type CertWatcher struct { - sync.Mutex - - currentCert *tls.Certificate - watcher *fsnotify.Watcher - - certPath string - keyPath string -} - -// New returns a new CertWatcher watching the given certificate and key. -func New(certPath, keyPath string) (*CertWatcher, error) { - var err error - - cw := &CertWatcher{ - certPath: certPath, - keyPath: keyPath, - } - - // Initial read of certificate and key. - if err := cw.ReadCertificate(); err != nil { - return nil, err - } - - cw.watcher, err = fsnotify.NewWatcher() - if err != nil { - return nil, err - } - - return cw, nil -} - -// GetCertificate fetches the currently loaded certificate, which may be nil. -func (cw *CertWatcher) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { - cw.Lock() - defer cw.Unlock() - return cw.currentCert, nil -} - -// Start starts the watch on the certificate and key files. -func (cw *CertWatcher) Start(stopCh <-chan struct{}) error { - files := []string{cw.certPath, cw.keyPath} - - for _, f := range files { - if err := cw.watcher.Add(f); err != nil { - return err - } - } - - go cw.Watch() - - log.Info("Starting certificate watcher") - - // Block until the stop channel is closed. - <-stopCh - - return cw.watcher.Close() -} - -// Watch reads events from the watcher's channel and reacts to changes. -func (cw *CertWatcher) Watch() { - for { - select { - case event, ok := <-cw.watcher.Events: - // Channel is closed. - if !ok { - return - } - - cw.handleEvent(event) - - case err, ok := <-cw.watcher.Errors: - // Channel is closed. - if !ok { - return - } - - log.Error(err, "certificate watch error") - } - } -} - -// ReadCertificate reads the certificate and key files from disk, parses them, -// and updates the current certificate on the watcher. If a callback is set, it -// is invoked with the new certificate. -func (cw *CertWatcher) ReadCertificate() error { - cert, err := tls.LoadX509KeyPair(cw.certPath, cw.keyPath) - if err != nil { - return err - } - - cw.Lock() - cw.currentCert = &cert - cw.Unlock() - - log.Info("Updated current TLS certiface") - - return nil -} - -func (cw *CertWatcher) handleEvent(event fsnotify.Event) { - // Only care about events which may modify the contents of the file. - if !(isWrite(event) || isRemove(event) || isCreate(event)) { - return - } - - log.V(1).Info("certificate event", "event", event) - - // If the file was removed, re-add the watch. - if isRemove(event) { - if err := cw.watcher.Add(event.Name); err != nil { - log.Error(err, "error re-watching file") - } - } - - if err := cw.ReadCertificate(); err != nil { - log.Error(err, "error re-reading certificate") - } -} - -func isWrite(event fsnotify.Event) bool { - return event.Op&fsnotify.Write == fsnotify.Write -} - -func isCreate(event fsnotify.Event) bool { - return event.Op&fsnotify.Create == fsnotify.Create -} - -func isRemove(event fsnotify.Event) bool { - return event.Op&fsnotify.Remove == fsnotify.Remove -} diff --git a/pkg/webhook/internal/metrics/metrics.go b/pkg/webhook/internal/metrics/metrics.go index ff82c10d8d..f1e6ce68f5 100644 --- a/pkg/webhook/internal/metrics/metrics.go +++ b/pkg/webhook/internal/metrics/metrics.go @@ -17,35 +17,73 @@ limitations under the License. package metrics import ( + "net/http" + "time" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" "sigs.k8s.io/controller-runtime/pkg/metrics" ) var ( - // TotalRequests is a prometheus metric which counts the total number of requests that - // the webhook server has received. - TotalRequests = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "controller_runtime_webhook_requests_total", - Help: "Total number of admission requests", - }, - []string{"webhook", "succeeded"}, - ) - // RequestLatency is a prometheus metric which is a histogram of the latency // of processing admission requests. RequestLatency = prometheus.NewHistogramVec( prometheus.HistogramOpts{ - Name: "controller_runtime_webhook_latency_seconds", - Help: "Histogram of the latency of processing admission requests", + Name: "controller_runtime_webhook_latency_seconds", + Help: "Histogram of the latency of processing admission requests", + NativeHistogramBucketFactor: 1.1, + NativeHistogramMaxBucketNumber: 100, + NativeHistogramMinResetDuration: 1 * time.Hour, }, []string{"webhook"}, ) + + // RequestTotal is a prometheus metric which is a counter of the total processed admission requests. + RequestTotal = func() *prometheus.CounterVec { + return prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "controller_runtime_webhook_requests_total", + Help: "Total number of admission requests by HTTP status code.", + }, + []string{"webhook", "code"}, + ) + }() + + // RequestInFlight is a prometheus metric which is a gauge of the in-flight admission requests. + RequestInFlight = func() *prometheus.GaugeVec { + return prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "controller_runtime_webhook_requests_in_flight", + Help: "Current number of admission requests being served.", + }, + []string{"webhook"}, + ) + }() ) func init() { - metrics.Registry.MustRegister( - TotalRequests, - RequestLatency) + metrics.Registry.MustRegister(RequestLatency, RequestTotal, RequestInFlight) +} + +// InstrumentedHook adds some instrumentation on top of the given webhook. +func InstrumentedHook(path string, hookRaw http.Handler) http.Handler { + lbl := prometheus.Labels{"webhook": path} + + lat := RequestLatency.MustCurryWith(lbl) + cnt := RequestTotal.MustCurryWith(lbl) + gge := RequestInFlight.With(lbl) + + // Initialize the most likely HTTP status codes. + cnt.WithLabelValues("200") + cnt.WithLabelValues("500") + + return promhttp.InstrumentHandlerDuration( + lat, + promhttp.InstrumentHandlerCounter( + cnt, + promhttp.InstrumentHandlerInFlight(gge, hookRaw), + ), + ) } diff --git a/pkg/webhook/server.go b/pkg/webhook/server.go index 68a8e7dd65..4d8ae9ec7a 100644 --- a/pkg/webhook/server.go +++ b/pkg/webhook/server.go @@ -19,169 +19,248 @@ package webhook import ( "context" "crypto/tls" + "crypto/x509" "fmt" "net" "net/http" - "path" + "os" "path/filepath" "strconv" "sync" "time" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" - "sigs.k8s.io/controller-runtime/pkg/webhook/internal/certwatcher" + "sigs.k8s.io/controller-runtime/pkg/certwatcher" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/internal/httpserver" "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" ) -const ( - certName = "tls.crt" - keyName = "tls.key" -) - // DefaultPort is the default port that the webhook server serves. -var DefaultPort = 443 +var DefaultPort = 9443 // Server is an admission webhook server that can serve traffic and // generates related k8s resources for deploying. -type Server struct { +// +// TLS is required for a webhook to be accessed by kubernetes, so +// you must provide a CertName and KeyName or have valid cert/key +// at the default locations (tls.crt and tls.key). If you do not +// want to configure TLS (i.e for testing purposes) run an +// admission.StandaloneWebhook in your own server. +type Server interface { + // NeedLeaderElection implements the LeaderElectionRunnable interface, which indicates + // the webhook server doesn't need leader election. + NeedLeaderElection() bool + + // Register marks the given webhook as being served at the given path. + // It panics if two hooks are registered on the same path. + Register(path string, hook http.Handler) + + // Start runs the server. + // It will install the webhook related resources depend on the server configuration. + Start(ctx context.Context) error + + // StartedChecker returns an healthz.Checker which is healthy after the + // server has been started. + StartedChecker() healthz.Checker + + // WebhookMux returns the servers WebhookMux + WebhookMux() *http.ServeMux +} + +// Options are all the available options for a webhook.Server +type Options struct { // Host is the address that the server will listen on. // Defaults to "" - all addresses. Host string // Port is the port number that the server will serve. - // It will be defaulted to 443 if unspecified. + // It will be defaulted to 9443 if unspecified. Port int - // CertDir is the directory that contains the server key and certificate. - // If using FSCertWriter in Provisioner, the server itself will provision the certificate and - // store it in this directory. - // If using SecretCertWriter in Provisioner, the server will provision the certificate in a secret, - // the user is responsible to mount the secret to the this location for the server to consume. + // CertDir is the directory that contains the server key and certificate. Defaults to + // /k8s-webhook-server/serving-certs. CertDir string + // CertName is the server certificate name. Defaults to tls.crt. + // + // Note: This option is only used when TLSOpts does not set GetCertificate. + CertName string + + // KeyName is the server key name. Defaults to tls.key. + // + // Note: This option is only used when TLSOpts does not set GetCertificate. + KeyName string + + // ClientCAName is the CA certificate name which server used to verify remote(client)'s certificate. + // Defaults to "", which means server does not verify client's certificate. + ClientCAName string + + // TLSOpts is used to allow configuring the TLS config used for the server. + // This also allows providing a certificate via GetCertificate. + TLSOpts []func(*tls.Config) + // WebhookMux is the multiplexer that handles different webhooks. WebhookMux *http.ServeMux +} - // webhooks keep track of all registered webhooks for dependency injection, - // and to provide better panic messages on duplicate webhook registration. - webhooks map[string]http.Handler +// NewServer constructs a new webhook.Server from the provided options. +func NewServer(o Options) Server { + return &DefaultServer{ + Options: o, + } +} - // setFields allows injecting dependencies from an external source - setFields inject.Func +// DefaultServer is the default implementation used for Server. +type DefaultServer struct { + Options Options + + // webhooks keep track of all registered webhooks + webhooks map[string]http.Handler // defaultingOnce ensures that the default fields are only ever set once. defaultingOnce sync.Once + + // started is set to true immediately before the server is started + // and thus can be used to check if the server has been started + started bool + + // mu protects access to the webhook map & setFields for Start, Register, etc + mu sync.Mutex + + webhookMux *http.ServeMux } // setDefaults does defaulting for the Server. -func (s *Server) setDefaults() { - s.webhooks = map[string]http.Handler{} - if s.WebhookMux == nil { - s.WebhookMux = http.NewServeMux() +func (o *Options) setDefaults() { + if o.WebhookMux == nil { + o.WebhookMux = http.NewServeMux() + } + + if o.Port <= 0 { + o.Port = DefaultPort + } + + if len(o.CertDir) == 0 { + o.CertDir = filepath.Join(os.TempDir(), "k8s-webhook-server", "serving-certs") } - if s.Port <= 0 { - s.Port = DefaultPort + if len(o.CertName) == 0 { + o.CertName = "tls.crt" } - if len(s.CertDir) == 0 { - s.CertDir = path.Join("/tmp", "k8s-webhook-server", "serving-certs") + if len(o.KeyName) == 0 { + o.KeyName = "tls.key" } } +func (s *DefaultServer) setDefaults() { + s.webhooks = map[string]http.Handler{} + s.Options.setDefaults() + + s.webhookMux = s.Options.WebhookMux +} + // NeedLeaderElection implements the LeaderElectionRunnable interface, which indicates // the webhook server doesn't need leader election. -func (*Server) NeedLeaderElection() bool { +func (*DefaultServer) NeedLeaderElection() bool { return false } // Register marks the given webhook as being served at the given path. // It panics if two hooks are registered on the same path. -func (s *Server) Register(path string, hook http.Handler) { +func (s *DefaultServer) Register(path string, hook http.Handler) { + s.mu.Lock() + defer s.mu.Unlock() + s.defaultingOnce.Do(s.setDefaults) - _, found := s.webhooks[path] - if found { + if _, found := s.webhooks[path]; found { panic(fmt.Errorf("can't register duplicate path: %v", path)) } - // TODO(directxman12): call setfields if we've already started the server s.webhooks[path] = hook - s.WebhookMux.Handle(path, instrumentedHook(path, hook)) -} - -// instrumentedHook adds some instrumentation on top of the given webhook. -func instrumentedHook(path string, hookRaw http.Handler) http.Handler { - return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { - startTS := time.Now() - defer func() { metrics.RequestLatency.WithLabelValues(path).Observe(time.Now().Sub(startTS).Seconds()) }() - hookRaw.ServeHTTP(resp, req) + s.webhookMux.Handle(path, metrics.InstrumentedHook(path, hook)) - // TODO(directxman12): add back in metric about total requests broken down by result? - }) + regLog := log.WithValues("path", path) + regLog.Info("Registering webhook") } // Start runs the server. // It will install the webhook related resources depend on the server configuration. -func (s *Server) Start(stop <-chan struct{}) error { +func (s *DefaultServer) Start(ctx context.Context) error { s.defaultingOnce.Do(s.setDefaults) - baseHookLog := log.WithName("webhooks") - // inject fields here as opposed to in Register so that we're certain to have our setFields - // function available. - for hookPath, webhook := range s.webhooks { - if err := s.setFields(webhook); err != nil { - return err - } + log.Info("Starting webhook server") - // NB(directxman12): we don't propagate this further by wrapping setFields because it's - // unclear if this is how we want to deal with log propagation. In this specific instance, - // we want to be able to pass a logger to webhooks because they don't know their own path. - if _, err := inject.LoggerInto(baseHookLog.WithValues("webhook", hookPath), webhook); err != nil { - return err - } + cfg := &tls.Config{ + NextProtos: []string{"h2"}, + } + // fallback TLS config ready, will now mutate if passer wants full control over it + for _, op := range s.Options.TLSOpts { + op(cfg) } - certPath := filepath.Join(s.CertDir, certName) - keyPath := filepath.Join(s.CertDir, keyName) + if cfg.GetCertificate == nil { + certPath := filepath.Join(s.Options.CertDir, s.Options.CertName) + keyPath := filepath.Join(s.Options.CertDir, s.Options.KeyName) - certWatcher, err := certwatcher.New(certPath, keyPath) - if err != nil { - return err + // Create the certificate watcher and + // set the config's GetCertificate on the TLSConfig + certWatcher, err := certwatcher.New(certPath, keyPath) + if err != nil { + return err + } + cfg.GetCertificate = certWatcher.GetCertificate + + go func() { + if err := certWatcher.Start(ctx); err != nil { + log.Error(err, "certificate watcher error") + } + }() } - go func() { - if err := certWatcher.Start(stop); err != nil { - log.Error(err, "certificate watcher error") + // Load CA to verify client certificate, if configured. + if s.Options.ClientCAName != "" { + certPool := x509.NewCertPool() + clientCABytes, err := os.ReadFile(filepath.Join(s.Options.CertDir, s.Options.ClientCAName)) + if err != nil { + return fmt.Errorf("failed to read client CA cert: %w", err) } - }() - cfg := &tls.Config{ - NextProtos: []string{"h2"}, - GetCertificate: certWatcher.GetCertificate, + ok := certPool.AppendCertsFromPEM(clientCABytes) + if !ok { + return fmt.Errorf("failed to append client CA cert to CA pool") + } + + cfg.ClientCAs = certPool + cfg.ClientAuth = tls.RequireAndVerifyClientCert } - listener, err := tls.Listen("tcp", net.JoinHostPort(s.Host, strconv.Itoa(int(s.Port))), cfg) + listener, err := tls.Listen("tcp", net.JoinHostPort(s.Options.Host, strconv.Itoa(s.Options.Port)), cfg) if err != nil { return err } - srv := &http.Server{ - Handler: s.WebhookMux, - } + log.Info("Serving webhook server", "host", s.Options.Host, "port", s.Options.Port) + + srv := httpserver.New(s.webhookMux) idleConnsClosed := make(chan struct{}) go func() { - <-stop + <-ctx.Done() + log.Info("Shutting down webhook server with timeout of 1 minute") - // TODO: use a context with reasonable timeout - if err := srv.Shutdown(context.Background()); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + if err := srv.Shutdown(ctx); err != nil { // Error from closing listeners, or context timeout log.Error(err, "error shutting down the HTTP server") } close(idleConnsClosed) }() - err = srv.Serve(listener) - if err != nil && err != http.ErrServerClosed { + s.mu.Lock() + s.started = true + s.mu.Unlock() + if err := srv.Serve(listener); err != nil && err != http.ErrServerClosed { return err } @@ -189,8 +268,35 @@ func (s *Server) Start(stop <-chan struct{}) error { return nil } -// InjectFunc injects the field setter into the server. -func (s *Server) InjectFunc(f inject.Func) error { - s.setFields = f - return nil +// StartedChecker returns an healthz.Checker which is healthy after the +// server has been started. +func (s *DefaultServer) StartedChecker() healthz.Checker { + config := &tls.Config{ + InsecureSkipVerify: true, + } + return func(req *http.Request) error { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.started { + return fmt.Errorf("webhook server has not been started yet") + } + + d := &net.Dialer{Timeout: 10 * time.Second} + conn, err := tls.DialWithDialer(d, "tcp", net.JoinHostPort(s.Options.Host, strconv.Itoa(s.Options.Port)), config) + if err != nil { + return fmt.Errorf("webhook server is not reachable: %w", err) + } + + if err := conn.Close(); err != nil { + return fmt.Errorf("webhook server is not reachable: closing connection: %w", err) + } + + return nil + } +} + +// WebhookMux returns the servers WebhookMux +func (s *DefaultServer) WebhookMux() *http.ServeMux { + return s.webhookMux } diff --git a/pkg/webhook/server_test.go b/pkg/webhook/server_test.go new file mode 100644 index 0000000000..6542222585 --- /dev/null +++ b/pkg/webhook/server_test.go @@ -0,0 +1,264 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook_test + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "net" + "net/http" + "path" + "reflect" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +var _ = Describe("Webhook Server", func() { + var ( + ctxCancel context.CancelFunc + testHostPort string + client *http.Client + server webhook.Server + servingOpts envtest.WebhookInstallOptions + genericStartServer func(f func(ctx context.Context)) (done <-chan struct{}) + ) + + BeforeEach(func() { + var ctx context.Context + // Has to be derived from context.Background() as it needs to be + // valid past the BeforeEach + ctx, ctxCancel = context.WithCancel(context.Background()) //nolint:forbidigo + + servingOpts = envtest.WebhookInstallOptions{} + Expect(servingOpts.PrepWithoutInstalling()).To(Succeed()) + + testHostPort = net.JoinHostPort(servingOpts.LocalServingHost, fmt.Sprintf("%d", servingOpts.LocalServingPort)) + + // bypass needing to set up the x509 cert pool, etc ourselves + clientTransport, err := rest.TransportFor(&rest.Config{ + TLSClientConfig: rest.TLSClientConfig{CAData: servingOpts.LocalServingCAData}, + }) + Expect(err).NotTo(HaveOccurred()) + client = &http.Client{ + Transport: clientTransport, + } + + server = webhook.NewServer(webhook.Options{ + Host: servingOpts.LocalServingHost, + Port: servingOpts.LocalServingPort, + CertDir: servingOpts.LocalServingCertDir, + }) + + genericStartServer = func(f func(ctx context.Context)) (done <-chan struct{}) { + doneCh := make(chan struct{}) + go func() { + defer GinkgoRecover() + defer close(doneCh) + f(ctx) + }() + // wait till we can ping the server to start the test + Eventually(func() error { + _, err := client.Get(fmt.Sprintf("https://%s/unservedpath", testHostPort)) + return err + }).Should(Succeed()) + + return doneCh + } + }) + AfterEach(func() { + Expect(servingOpts.Cleanup()).To(Succeed()) + }) + + startServer := func() (done <-chan struct{}) { + return genericStartServer(func(ctx context.Context) { + Expect(server.Start(ctx)).To(Succeed()) + }) + } + + // TODO(directxman12): figure out a good way to test all the serving setup + // with httptest.Server to get all the niceness from that. + + Context("when serving", func() { + PIt("should verify the client CA name when asked to", func() { + + }) + PIt("should support HTTP/2", func() { + + }) + + // TODO(directxman12): figure out a good way to test the port default, etc + }) + + It("should panic if a duplicate path is registered", func() { + server.Register("/somepath", &testHandler{}) + doneCh := startServer() + + Expect(func() { server.Register("/somepath", &testHandler{}) }).To(Panic()) + + ctxCancel() + Eventually(doneCh, "4s").Should(BeClosed()) + }) + + Context("when registering new webhooks before starting", func() { + It("should serve a webhook on the requested path", func() { + server.Register("/somepath", &testHandler{}) + + Expect(server.StartedChecker()(nil)).ToNot(Succeed()) + + doneCh := startServer() + + Eventually(func() ([]byte, error) { + resp, err := client.Get(fmt.Sprintf("https://%s/somepath", testHostPort)) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + return io.ReadAll(resp.Body) + }).Should(Equal([]byte("gadzooks!"))) + + Expect(server.StartedChecker()(nil)).To(Succeed()) + + ctxCancel() + Eventually(doneCh, "4s").Should(BeClosed()) + }) + }) + + Context("when registering webhooks after starting", func() { + var ( + doneCh <-chan struct{} + ) + BeforeEach(func() { + doneCh = startServer() + }) + AfterEach(func() { + // wait for cleanup to happen + ctxCancel() + Eventually(doneCh, "4s").Should(BeClosed()) + }) + + It("should serve a webhook on the requested path", func() { + server.Register("/somepath", &testHandler{}) + resp, err := client.Get(fmt.Sprintf("https://%s/somepath", testHostPort)) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + + Expect(io.ReadAll(resp.Body)).To(Equal([]byte("gadzooks!"))) + }) + }) + + It("should respect passed in TLS configurations", func() { + var finalCfg *tls.Config + tlsCfgFunc := func(cfg *tls.Config) { + cfg.CipherSuites = []uint16{ + tls.TLS_AES_128_GCM_SHA256, + tls.TLS_AES_256_GCM_SHA384, + } + cfg.MinVersion = tls.VersionTLS12 + // save cfg after changes to test against + finalCfg = cfg + } + server = webhook.NewServer(webhook.Options{ + Host: servingOpts.LocalServingHost, + Port: servingOpts.LocalServingPort, + CertDir: servingOpts.LocalServingCertDir, + TLSOpts: []func(*tls.Config){ + tlsCfgFunc, + }, + }) + server.Register("/somepath", &testHandler{}) + doneCh := genericStartServer(func(ctx context.Context) { + Expect(server.Start(ctx)).To(Succeed()) + }) + + Eventually(func() ([]byte, error) { + resp, err := client.Get(fmt.Sprintf("https://%s/somepath", testHostPort)) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + return io.ReadAll(resp.Body) + }).Should(Equal([]byte("gadzooks!"))) + Expect(finalCfg.MinVersion).To(Equal(uint16(tls.VersionTLS12))) + Expect(finalCfg.CipherSuites).To(ContainElements( + tls.TLS_AES_128_GCM_SHA256, + tls.TLS_AES_256_GCM_SHA384, + )) + + ctxCancel() + Eventually(doneCh, "4s").Should(BeClosed()) + }) + + It("should prefer GetCertificate through TLSOpts", func() { + var finalCfg *tls.Config + finalCert, err := tls.LoadX509KeyPair( + path.Join(servingOpts.LocalServingCertDir, "tls.crt"), + path.Join(servingOpts.LocalServingCertDir, "tls.key"), + ) + Expect(err).NotTo(HaveOccurred()) + finalGetCertificate := func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { //nolint:unparam + return &finalCert, nil + } + server = &webhook.DefaultServer{Options: webhook.Options{ + Host: servingOpts.LocalServingHost, + Port: servingOpts.LocalServingPort, + CertDir: servingOpts.LocalServingCertDir, + + TLSOpts: []func(*tls.Config){ + func(cfg *tls.Config) { + cfg.GetCertificate = finalGetCertificate + cfg.MinVersion = tls.VersionTLS12 + // save cfg after changes to test against + finalCfg = cfg + }, + }, + }} + server.Register("/somepath", &testHandler{}) + doneCh := genericStartServer(func(ctx context.Context) { + Expect(server.Start(ctx)).To(Succeed()) + }) + + Eventually(func() ([]byte, error) { + resp, err := client.Get(fmt.Sprintf("https://%s/somepath", testHostPort)) + Expect(err).NotTo(HaveOccurred()) + defer resp.Body.Close() + return io.ReadAll(resp.Body) + }).Should(Equal([]byte("gadzooks!"))) + Expect(finalCfg.MinVersion).To(Equal(uint16(tls.VersionTLS12))) + // We can't compare the functions directly, but we can compare their pointers + if reflect.ValueOf(finalCfg.GetCertificate).Pointer() != reflect.ValueOf(finalGetCertificate).Pointer() { + Fail("GetCertificate was not set properly, or overwritten") + } + cert, err := finalCfg.GetCertificate(nil) + Expect(err).NotTo(HaveOccurred()) + Expect(cert).To(BeEquivalentTo(&finalCert)) + + ctxCancel() + Eventually(doneCh, "4s").Should(BeClosed()) + }) +}) + +type testHandler struct { +} + +func (t *testHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + if _, err := resp.Write([]byte("gadzooks!")); err != nil { + panic("unable to write http response!") + } +} diff --git a/pkg/webhook/webhook_integration_test.go b/pkg/webhook/webhook_integration_test.go new file mode 100644 index 0000000000..cbb5b711f7 --- /dev/null +++ b/pkg/webhook/webhook_integration_test.go @@ -0,0 +1,157 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook_test + +import ( + "context" + "crypto/tls" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +var _ = Describe("Webhook", func() { + var c client.Client + var obj *appsv1.Deployment + BeforeEach(func() { + Expect(cfg).NotTo(BeNil()) + var err error + c, err = client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + obj = &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "default", + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + }, + }, + } + }) + Context("when running a webhook server with a manager", func() { + It("should reject create request for webhook that rejects all requests", func(ctx SpecContext) { + m, err := manager.New(cfg, manager.Options{ + WebhookServer: webhook.NewServer(webhook.Options{ + Port: testenv.WebhookInstallOptions.LocalServingPort, + Host: testenv.WebhookInstallOptions.LocalServingHost, + CertDir: testenv.WebhookInstallOptions.LocalServingCertDir, + TLSOpts: []func(*tls.Config){func(config *tls.Config) {}}, + }), + }) // we need manager here just to leverage manager.SetFields + Expect(err).NotTo(HaveOccurred()) + server := m.GetWebhookServer() + server.Register("/failing", &webhook.Admission{Handler: &rejectingValidator{d: admission.NewDecoder(testenv.Scheme)}}) + + go func() { + err := server.Start(ctx) + Expect(err).NotTo(HaveOccurred()) + }() + + Eventually(func() bool { + err := c.Create(ctx, obj) + return err != nil && strings.HasSuffix(err.Error(), "Always denied") && apierrors.ReasonForError(err) == metav1.StatusReasonForbidden + }, 1*time.Second).Should(BeTrue()) + }) + It("should reject create request for multi-webhook that rejects all requests", func(ctx SpecContext) { + m, err := manager.New(cfg, manager.Options{ + Metrics: metricsserver.Options{BindAddress: "0"}, + WebhookServer: webhook.NewServer(webhook.Options{ + Port: testenv.WebhookInstallOptions.LocalServingPort, + Host: testenv.WebhookInstallOptions.LocalServingHost, + CertDir: testenv.WebhookInstallOptions.LocalServingCertDir, + TLSOpts: []func(*tls.Config){func(config *tls.Config) {}}, + }), + }) // we need manager here just to leverage manager.SetFields + Expect(err).NotTo(HaveOccurred()) + server := m.GetWebhookServer() + server.Register("/failing", &webhook.Admission{Handler: admission.MultiValidatingHandler(&rejectingValidator{d: admission.NewDecoder(testenv.Scheme)})}) + + go func() { + defer GinkgoRecover() + err = server.Start(ctx) + Expect(err).NotTo(HaveOccurred()) + }() + + Eventually(func() bool { + err = c.Create(ctx, obj) + return err != nil && strings.HasSuffix(err.Error(), "Always denied") && apierrors.ReasonForError(err) == metav1.StatusReasonForbidden + }, 1*time.Second).Should(BeTrue()) + }) + }) + Context("when running a webhook server without a manager", func() { + It("should reject create request for webhook that rejects all requests", func(ctx SpecContext) { + server := webhook.NewServer(webhook.Options{ + Port: testenv.WebhookInstallOptions.LocalServingPort, + Host: testenv.WebhookInstallOptions.LocalServingHost, + CertDir: testenv.WebhookInstallOptions.LocalServingCertDir, + }) + server.Register("/failing", &webhook.Admission{Handler: &rejectingValidator{d: admission.NewDecoder(testenv.Scheme)}}) + + go func() { + err := server.Start(ctx) + Expect(err).NotTo(HaveOccurred()) + }() + + Eventually(func() bool { + err := c.Create(ctx, obj) + return err != nil && strings.HasSuffix(err.Error(), "Always denied") && apierrors.ReasonForError(err) == metav1.StatusReasonForbidden + }, 1*time.Second).Should(BeTrue()) + }) + }) +}) + +type rejectingValidator struct { + d admission.Decoder +} + +func (v *rejectingValidator) Handle(ctx context.Context, req admission.Request) admission.Response { + var obj appsv1.Deployment + if err := v.d.Decode(req, &obj); err != nil { + return admission.Denied(err.Error()) + } + return admission.Denied("Always denied") +} diff --git a/pkg/webhook/webhook_suite_test.go b/pkg/webhook/webhook_suite_test.go index 8e0b4b9f94..ee9c1f4057 100644 --- a/pkg/webhook/webhook_suite_test.go +++ b/pkg/webhook/webhook_suite_test.go @@ -14,16 +14,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -package webhook +package webhook_test import ( + "fmt" "testing" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - - "k8s.io/client-go/kubernetes" + admissionv1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -31,28 +33,73 @@ import ( func TestSource(t *testing.T) { RegisterFailHandler(Fail) - RunSpecsWithDefaultAndCustomReporters(t, "Webhook Integration Suite", []Reporter{envtest.NewlineReporter{}}) + RunSpecs(t, "Webhook Integration Suite") } var testenv *envtest.Environment var cfg *rest.Config -var clientset *kubernetes.Clientset -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) testenv = &envtest.Environment{} - + // we're initializing webhook here and not in webhook.go to also test the envtest install code via WebhookOptions + initializeWebhookInEnvironment() var err error cfg, err = testenv.Start() Expect(err).NotTo(HaveOccurred()) - - clientset, err = kubernetes.NewForConfig(cfg) - Expect(err).NotTo(HaveOccurred()) - - close(done) -}, 60) +}) var _ = AfterSuite(func() { + fmt.Println("stopping?") Expect(testenv.Stop()).To(Succeed()) }) + +func initializeWebhookInEnvironment() { + namespacedScopeV1 := admissionv1.NamespacedScope + failedTypeV1 := admissionv1.Fail + equivalentTypeV1 := admissionv1.Equivalent + noSideEffectsV1 := admissionv1.SideEffectClassNone + webhookPathV1 := "/failing" + + testenv.WebhookInstallOptions = envtest.WebhookInstallOptions{ + ValidatingWebhooks: []*admissionv1.ValidatingWebhookConfiguration{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-validation-webhook-config", + }, + TypeMeta: metav1.TypeMeta{ + Kind: "ValidatingWebhookConfiguration", + APIVersion: "admissionregistration.k8s.io/v1", + }, + Webhooks: []admissionv1.ValidatingWebhook{ + { + Name: "deployment-validation.kubebuilder.io", + Rules: []admissionv1.RuleWithOperations{ + { + Operations: []admissionv1.OperationType{"CREATE", "UPDATE"}, + Rule: admissionv1.Rule{ + APIGroups: []string{"apps"}, + APIVersions: []string{"v1"}, + Resources: []string{"deployments"}, + Scope: &namespacedScopeV1, + }, + }, + }, + FailurePolicy: &failedTypeV1, + MatchPolicy: &equivalentTypeV1, + SideEffects: &noSideEffectsV1, + ClientConfig: admissionv1.WebhookClientConfig{ + Service: &admissionv1.ServiceReference{ + Name: "deployment-validation-service", + Namespace: "default", + Path: &webhookPathV1, + }, + }, + AdmissionReviewVersions: []string{"v1"}, + }, + }, + }, + }, + } +} diff --git a/tools/setup-envtest/README.md b/tools/setup-envtest/README.md new file mode 100644 index 0000000000..a4de6f3eae --- /dev/null +++ b/tools/setup-envtest/README.md @@ -0,0 +1,119 @@ +# Envtest Binaries Manager + +This is a small tool that manages binaries for envtest. It can be used to +download new binaries, list currently installed and available ones, and +clean up versions. + +To use it, just go-install it with Golang 1.24+ (it's a separate, self-contained +module): + +```shell +go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest +``` + +If you are using Golang 1.23, use the `release-0.20` branch instead: + +```shell +go install sigs.k8s.io/controller-runtime/tools/setup-envtest@release-0.20 +``` + +For full documentation, run it with the `--help` flag, but here are some +examples: + +```shell +# download the latest envtest, and print out info about it +setup-envtest use + +# download the latest 1.19 envtest, and print out the path +setup-envtest use -p path 1.19.x! + +# switch to the most recent 1.21 envtest on disk +source <(setup-envtest use -i -p env 1.21.x) + +# list all available local versions for darwin/amd64 +setup-envtest list -i --os darwin --arch amd64 + +# remove all versions older than 1.16 from disk +setup-envtest cleanup <1.16 + +# use the value from $KUBEBUILDER_ASSETS if set, otherwise follow the normal +# logic for 'use' +setup-envtest --use-env + +# use the value from $KUBEBUILDER_ASSETS if set, otherwise use the latest +# installed version +setup-envtest use -i --use-env + +# sideload a pre-downloaded tarball as Kubernetes 1.16.2 into our store +setup-envtest sideload 1.16.2 < downloaded-envtest.tar.gz + +# Per default envtest binaries are downloaded from: +# https://raw.githubusercontent.com/kubernetes-sigs/controller-tools/master/envtest-releases.yaml +# To download from a custom index use the following: +setup-envtest use --index https://custom.com/envtest-releases.yaml + +``` + +## Where does it put all those binaries? + +By default, binaries are stored in a subdirectory of an OS-specific data +directory, as per the OS's conventions. + +On Linux, this is `$XDG_DATA_HOME`; on Windows, `%LocalAppData`; and on +OSX, `~/Library/Application Support`. + +There's an overall folder that holds all files, and inside that is +a folder for each version/platform pair. The exact directory structure is +not guaranteed, except that the leaf directory will contain the names +expected by envtest. You should always use `setup-envtest fetch` or +`setup-envtest switch` (generally with the `-p path` or `-p env` flags) to +get the directory that you should use. + +## Why do I have to do that `source <(blah blah blah)` thing + +This is a normal binary, not a shell script, so we can't set the parent +process's environment variables. If you use this by hand a lot and want +to save the typing, you could put something like the following in your +`~/.zshrc` (or similar for bash/fish/whatever, modified to those): + +```shell +setup-envtest() { + if (($@[(Ie)use])); then + source <($GOPATH/bin/setup-envtest "$@" -p env) + else + $GOPATH/bin/setup-envtest "$@" + fi +} +``` + +## What if I don't want to talk to the internet? + +There are a few options. + +First, you'll probably want to set the `-i/--installed` flag. If you want +to avoid forgetting to set this flag, set the `ENVTEST_INSTALLED_ONLY` +env variable, which will switch that flag on by default. + +Then, you have a few options for managing your binaries: + +- If you don't *really* want to manage with this tool, or you want to + respect the $KUBEBUILDER_ASSETS variable if it's set to something + outside the store, use the `use --use-env -i` command. + + `--use-env` makes the command unconditionally use the value of + KUBEBUILDER_ASSETS as long as it contains the required binaries, and + `-i` indicates that we only ever want to work with installed binaries. + + As noted about, you can use `ENVTEST_INSTALLED_ONLY=true` to switch `-i` + on by default, and you can use `ENVTEST_USE_ENV=true` to switch + `--use-env` on by default. + +- If you want to use this tool, but download your gziped tarballs + separately, you can use the `sideload` command. You'll need to use the + `-k/--version` flag to indicate which version you're sideloading. + + After that, it'll be as if you'd installed the binaries with `use`. + +- If you want to talk to some internal source via HTTP, you can simply set `--index` + The index must contain references to envtest binary archives in the same format as: + https://raw.githubusercontent.com/kubernetes-sigs/controller-tools/master/envtest-releases.yaml diff --git a/tools/setup-envtest/env/env.go b/tools/setup-envtest/env/env.go new file mode 100644 index 0000000000..6168739eb6 --- /dev/null +++ b/tools/setup-envtest/env/env.go @@ -0,0 +1,478 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package env + +import ( + "context" + "errors" + "fmt" + "io" + "io/fs" + "path/filepath" + "sort" + "strings" + "text/tabwriter" + + "github.com/go-logr/logr" + "github.com/spf13/afero" // too bad fs.FS isn't writable :-/ + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/remote" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/store" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +// Env represents an environment for downloading and otherwise manipulating +// envtest binaries. +// +// In general, the methods will use the Exit{,Cause} functions from this package +// to indicate errors. Catch them with a `defer HandleExitWithCode()`. +type Env struct { + // the following *must* be set on input + + // Platform is our current platform + Platform versions.PlatformItem + + // VerifySum indicates whether we should run checksums. + VerifySum bool + // NoDownload forces us to not contact remote services, + // looking only at local files instead. + NoDownload bool + // ForceDownload forces us to ignore local files and always + // contact remote services & re-download. + ForceDownload bool + + // Client is our remote client for contacting remote services. + Client remote.Client + + // Log allows us to log. + Log logr.Logger + + // the following *may* be set on input, or may be discovered + + // Version is the version(s) that we want to download + // (may be automatically retrieved later on). + Version versions.Spec + + // Store is used to load/store entries to/from disk. + Store *store.Store + + // FS is the file system to read from/write to for provisioning temp files + // for storing the archives temporarily. + FS afero.Afero + + // Out is the place to write output text to + Out io.Writer + + // manualPath is the manually discovered path from PathMatches, if + // a non-store path was used. It'll be printed by PrintInfo if present. + manualPath string +} + +// CheckCoherence checks that this environment has filled-out, coherent settings +// (e.g. NoDownload & ForceDownload aren't both set). +func (e *Env) CheckCoherence() { + if e.NoDownload && e.ForceDownload { + Exit(2, "cannot both skip downloading *and* force re-downloading") + } + + if e.Platform.OS == "" || e.Platform.Arch == "" { + Exit(2, "must specify non-empty OS and arch (did you specify bad --os or --arch values?)") + } +} + +func (e *Env) filter() store.Filter { + return store.Filter{Version: e.Version, Platform: e.Platform.Platform} +} + +func (e *Env) item() store.Item { + concreteVer := e.Version.AsConcrete() + if concreteVer == nil || e.Platform.IsWildcard() { + panic("no platform/version set") // unexpected, print stack trace + } + return store.Item{Version: *concreteVer, Platform: e.Platform.Platform} +} + +// ListVersions prints out all available versions matching this Env's +// platform & version selector (respecting NoDownload to figure +// out whether or not to match remote versions). +func (e *Env) ListVersions(ctx context.Context) { + out := tabwriter.NewWriter(e.Out, 4, 4, 2, ' ', 0) + defer out.Flush() + localVersions, err := e.Store.List(ctx, e.filter()) + if err != nil { + ExitCause(2, err, "unable to list installed versions") + } + for _, item := range localVersions { + // already filtered by onDiskVersions + fmt.Fprintf(out, "(installed)\tv%s\t%s\n", item.Version, item.Platform) + } + + if e.NoDownload { + return + } + + remoteVersions, err := e.Client.ListVersions(ctx) + if err != nil { + ExitCause(2, err, "unable list to available versions") + } + + for _, set := range remoteVersions { + if !e.Version.Matches(set.Version) { + continue + } + sort.Slice(set.Platforms, func(i, j int) bool { + return orderPlatforms(set.Platforms[i].Platform, set.Platforms[j].Platform) + }) + for _, plat := range set.Platforms { + if e.Platform.Matches(plat.Platform) { + fmt.Fprintf(out, "(available)\tv%s\t%s\n", set.Version, plat) + } + } + } +} + +// LatestVersion returns the latest version matching our version selector and +// platform from the remote server, with the corresponding checksum for later +// use as well. +func (e *Env) LatestVersion(ctx context.Context) (versions.Concrete, versions.PlatformItem) { + vers, err := e.Client.ListVersions(ctx) + if err != nil { + ExitCause(2, err, "unable to list versions to find latest one") + } + for _, set := range vers { + if !e.Version.Matches(set.Version) { + e.Log.V(1).Info("skipping non-matching version", "version", set.Version) + continue + } + // double-check that our platform is supported + for _, plat := range set.Platforms { + // NB(directxman12): we're already iterating in order, so no + // need to check if the wildcard is latest vs any + if e.Platform.Matches(plat.Platform) && e.Version.Matches(set.Version) { + return set.Version, plat + } + } + e.Log.Info("latest version not supported for your platform, checking older ones", "version", set.Version, "platform", e.Platform) + } + + Exit(2, "unable to find a version that was supported for platform %s", e.Platform) + return versions.Concrete{}, versions.PlatformItem{} // unreachable, but Go's type system can't express the "never" type +} + +// ExistsAndValid checks if our current (concrete) version & platform +// exist on disk (unless ForceDownload is set, in which cause it always +// returns false). +// +// Must be called after EnsureVersionIsSet so that we have a concrete +// Version selected. Must have a concrete platform, or ForceDownload +// must be set. +func (e *Env) ExistsAndValid() bool { + if e.ForceDownload { + // we always want to download, so don't check here + return false + } + + if e.Platform.IsWildcard() { + Exit(2, "you must have a concrete platform with this command -- you cannot use wildcard platforms with fetch or switch") + } + + exists, err := e.Store.Has(e.item()) + if err != nil { + ExitCause(2, err, "unable to check if existing version exists") + } + + if exists { + e.Log.Info("applicable version found on disk", "version", e.Version) + } + return exists +} + +// EnsureVersionIsSet ensures that we have a non-wildcard version +// configured. +// +// If necessary, it will enumerate on-disk and remote versions to accomplish +// this, finding a version that matches our version selector and platform. +// It will always yield a concrete version, it *may* yield a concrete platform +// as well. +func (e *Env) EnsureVersionIsSet(ctx context.Context) { + if e.Version.AsConcrete() != nil { + return + } + var localVer *versions.Concrete + var localPlat versions.Platform + + items, err := e.Store.List(ctx, e.filter()) + if err != nil { + ExitCause(2, err, "unable to determine installed versions") + } + + for _, item := range items { + if !e.Version.Matches(item.Version) || !e.Platform.Matches(item.Platform) { + e.Log.V(1).Info("skipping version, doesn't match", "version", item.Version, "platform", item.Platform) + continue + } + // NB(directxman12): we're already iterating in order, so no + // need to check if the wildcard is latest vs any + ver := item.Version // copy to avoid referencing iteration variable + localVer = &ver + localPlat = item.Platform + break + } + + if e.NoDownload || !e.Version.CheckLatest { + // no version specified, but we either + // + // a) shouldn't contact remote + // b) don't care to find the absolute latest + // + // so just find the latest local version + if localVer != nil { + e.Version.MakeConcrete(*localVer) + e.Platform.Platform = localPlat + return + } + if e.NoDownload { + Exit(2, "no applicable on-disk versions for %s found, you'll have to download one, or run list -i to see what you do have", e.Platform) + } + // if we didn't ask for the latest version, but don't have anything + // available, try the internet ;-) + } + + // no version specified and we need the latest in some capacity, so find latest from remote + // so find the latest local first, then compare it to the latest remote, and use whichever + // of the two is more recent. + e.Log.Info("no version specified, finding latest") + serverVer, platform := e.LatestVersion(ctx) + + // if we're not forcing a download, and we have a newer local version, just use that + if !e.ForceDownload && localVer != nil && localVer.NewerThan(serverVer) { + e.Platform.Platform = localPlat // update our data with hash + e.Version.MakeConcrete(*localVer) + return + } + + // otherwise, use the new version from the server + e.Platform = platform // update our data with hash + e.Version.MakeConcrete(serverVer) +} + +// Fetch ensures that the requested platform and version are on disk. +// You must call EnsureVersionIsSet before calling this method. +// +// If ForceDownload is set, we always download, otherwise we only download +// if we're missing the version on disk. +func (e *Env) Fetch(ctx context.Context) { + log := e.Log.WithName("fetch") + + // if we didn't just fetch it, grab the sum to verify + if e.VerifySum && e.Platform.Hash == nil { + if err := e.Client.FetchSum(ctx, *e.Version.AsConcrete(), &e.Platform); err != nil { + ExitCause(2, err, "unable to fetch hash for requested version") + } + } + if !e.VerifySum { + e.Platform.Hash = nil // skip verification + } + + var packedPath string + + // cleanup on error (needs to be here so it will happen after the other defers) + defer e.cleanupOnError(func() { + if packedPath != "" { + e.Log.V(1).Info("cleaning up downloaded archive", "path", packedPath) + if err := e.FS.Remove(packedPath); err != nil && !errors.Is(err, fs.ErrNotExist) { + e.Log.Error(err, "unable to clean up archive path", "path", packedPath) + } + } + }) + + archiveOut, err := e.FS.TempFile("", "*-"+e.Platform.ArchiveName(*e.Version.AsConcrete())) + if err != nil { + ExitCause(2, err, "unable to open file to write downloaded archive to") + } + defer archiveOut.Close() + packedPath = archiveOut.Name() + log.V(1).Info("writing downloaded archive", "path", packedPath) + + if err := e.Client.GetVersion(ctx, *e.Version.AsConcrete(), e.Platform, archiveOut); err != nil { + ExitCause(2, err, "unable to download requested version") + } + log.V(1).Info("downloaded archive", "path", packedPath) + + if err := archiveOut.Sync(); err != nil { // sync before reading back + ExitCause(2, err, "unable to flush downloaded archive file") + } + if _, err := archiveOut.Seek(0, 0); err != nil { + ExitCause(2, err, "unable to jump back to beginning of archive file to unzip") + } + + if err := e.Store.Add(ctx, e.item(), archiveOut); err != nil { + ExitCause(2, err, "unable to store version to disk") + } + + log.V(1).Info("removing archive from disk", "path", packedPath) + if err := e.FS.Remove(packedPath); err != nil { + // don't bail, this isn't fatal + log.Error(err, "unable to remove downloaded archive", "path", packedPath) + } +} + +// cleanup on error cleans up if we hit an exitCode error. +// +// Use it in a defer. +func (e *Env) cleanupOnError(extraCleanup func()) { + cause := recover() + if cause == nil { + return + } + // don't panic in a panic handler + var exit *exitCode + if asExit(cause, &exit) && exit.code != 0 { + e.Log.Info("cleaning up due to error") + // we already log in the function, and don't want to panic, so + // ignore the error + extraCleanup() + } + panic(cause) // re-start the panic now that we're done +} + +// Remove removes the data for our version selector & platform from disk. +func (e *Env) Remove(ctx context.Context) { + items, err := e.Store.Remove(ctx, e.filter()) + for _, item := range items { + fmt.Fprintf(e.Out, "removed %s\n", item) + } + if err != nil { + ExitCause(2, err, "unable to remove all requested version(s)") + } +} + +// PrintInfo prints out information about a single, current version +// and platform, according to the given formatting info. +func (e *Env) PrintInfo(printFmt PrintFormat) { + // use the manual path if it's set, otherwise use the standard path + path := e.manualPath + if e.manualPath == "" { + item := e.item() + var err error + path, err = e.Store.Path(item) + if err != nil { + ExitCause(2, err, "unable to get path for version %s", item) + } + } + switch printFmt { + case PrintOverview: + fmt.Fprintf(e.Out, "Version: %s\n", e.Version) + fmt.Fprintf(e.Out, "OS/Arch: %s\n", e.Platform) + if e.Platform.Hash != nil { + fmt.Fprintf(e.Out, "%s: %s\n", e.Platform.Hash.Type, e.Platform.Hash.Value) + } + fmt.Fprintf(e.Out, "Path: %s\n", path) + case PrintPath: + fmt.Fprint(e.Out, path) // NB(directxman12): no newline -- want the bare path here + case PrintEnv: + // quote in case there are spaces, etc in the path + // the weird string below works like this: + // - you can't escape quotes in shell + // - shell strings that are next to each other are concatenated (so "a""b""c" == "abc") + // - you can intermix quote styles using the above + // - so `'"'"'` --> CLOSE_QUOTE + "'" + OPEN_QUOTE + shellQuoted := strings.ReplaceAll(path, "'", `'"'"'`) + fmt.Fprintf(e.Out, "export KUBEBUILDER_ASSETS='%s'\n", shellQuoted) + default: + panic(fmt.Sprintf("unexpected print format %v", printFmt)) + } +} + +// EnsureBaseDirs ensures that the base packed and unpacked directories +// exist. +// +// This should be the first thing called after CheckCoherence. +func (e *Env) EnsureBaseDirs(ctx context.Context) { + if err := e.Store.Initialize(ctx); err != nil { + ExitCause(2, err, "unable to make sure store is initialized") + } +} + +// Sideload takes an input stream, and loads it as if it had been a downloaded .tar.gz file +// for the current *concrete* version and platform. +func (e *Env) Sideload(ctx context.Context, input io.Reader) { + log := e.Log.WithName("sideload") + if e.Version.AsConcrete() == nil || e.Platform.IsWildcard() { + Exit(2, "must specify a concrete version and platform to sideload. Make sure you've passed a version, like 'sideload 1.21.0'") + } + log.V(1).Info("sideloading from input stream to version", "version", e.Version, "platform", e.Platform) + if err := e.Store.Add(ctx, e.item(), input); err != nil { + ExitCause(2, err, "unable to sideload item to disk") + } +} + +var ( + // expectedExecutables are the executables that are checked in PathMatches + // for non-store paths. + expectedExecutables = []string{ + "kube-apiserver", + "etcd", + "kubectl", + } +) + +// PathMatches checks if the path (e.g. from the environment variable) +// matches this version & platform selector, and if so, returns true. +func (e *Env) PathMatches(value string) bool { + e.Log.V(1).Info("checking if (env var) path represents our desired version", "path", value) + if value == "" { + // if we're unset, + return false + } + + if e.versionFromPathName(value) { + e.Log.V(1).Info("path appears to be in our store, using that info", "path", value) + return true + } + + e.Log.V(1).Info("path is not in our store, checking for binaries", "path", value) + for _, expected := range expectedExecutables { + _, err := e.FS.Stat(filepath.Join(value, expected)) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // one of our required binaries is missing, return false + e.Log.V(1).Info("missing required binary in (env var) path", "binary", expected, "path", value) + return false + } + ExitCause(2, err, "unable to check for existence of binary %s from existing (env var) path %s", value, expected) + } + } + + // success, all binaries present + e.Log.V(1).Info("all required binaries present in (env var) path, using that", "path", value) + + // don't bother checking the version, the user explicitly asked us to use this + // we don't know the version, so set it to wildcard + e.Version = versions.AnyVersion + e.Platform.OS = "*" + e.Platform.Arch = "*" + e.manualPath = value + return true +} + +// versionFromPathName checks if the given path's last component looks like one +// of our versions, and, if so, what version it represents. If successful, +// it'll set version and platform, and return true. Otherwise it returns +// false. +func (e *Env) versionFromPathName(value string) bool { + baseName := filepath.Base(value) + ver, pl := versions.ExtractWithPlatform(versions.VersionPlatformRE, baseName) + if ver == nil { + // not a version that we can tell + return false + } + + // yay we got a version! + e.Version.MakeConcrete(*ver) + e.Platform.Platform = pl + e.manualPath = value // might be outside our store, set this just in case + + return true +} diff --git a/tools/setup-envtest/env/env_suite_test.go b/tools/setup-envtest/env/env_suite_test.go new file mode 100644 index 0000000000..3400dd91aa --- /dev/null +++ b/tools/setup-envtest/env/env_suite_test.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package env_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/go-logr/logr" + "github.com/go-logr/zapr" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +var testLog logr.Logger + +func zapLogger() logr.Logger { + testOut := zapcore.AddSync(GinkgoWriter) + enc := zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()) + // bleh setting up logging to the ginkgo writer is annoying + zapLog := zap.New(zapcore.NewCore(enc, testOut, zap.DebugLevel), + zap.ErrorOutput(testOut), zap.Development(), zap.AddStacktrace(zap.WarnLevel)) + return zapr.NewLogger(zapLog) +} + +func TestEnv(t *testing.T) { + testLog = zapLogger() + + RegisterFailHandler(Fail) + RunSpecs(t, "Env Suite") +} diff --git a/tools/setup-envtest/env/env_test.go b/tools/setup-envtest/env/env_test.go new file mode 100644 index 0000000000..fd6e7633bd --- /dev/null +++ b/tools/setup-envtest/env/env_test.go @@ -0,0 +1,108 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package env_test + +import ( + "bytes" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/spf13/afero" + + . "sigs.k8s.io/controller-runtime/tools/setup-envtest/env" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/store" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +var _ = Describe("Env", func() { + // Most of the rest of this is tested e2e via the workflows test, + // but there's a few things that are easier to test here. Eventually + // we should maybe move some of the tests here. + var ( + env *Env + outBuffer *bytes.Buffer + ) + BeforeEach(func() { + outBuffer = new(bytes.Buffer) + env = &Env{ + Out: outBuffer, + Log: testLog, + + Store: &store.Store{ + // use spaces and quotes to test our quote escaping below + Root: afero.NewBasePathFs(afero.NewMemMapFs(), "/kb's test store"), + }, + + // shouldn't use these, but just in case + NoDownload: true, + FS: afero.Afero{Fs: afero.NewMemMapFs()}, + } + + env.Version.MakeConcrete(versions.Concrete{ + Major: 1, Minor: 21, Patch: 3, + }) + env.Platform.Platform = versions.Platform{ + OS: "linux", Arch: "amd64", + } + }) + + Describe("printing", func() { + It("should use a manual path if one is present", func() { + By("using a manual path") + Expect(env.PathMatches("/otherstore/1.21.4-linux-amd64")).To(BeTrue()) + + By("checking that that path is printed properly") + env.PrintInfo(PrintPath) + Expect(outBuffer.String()).To(Equal("/otherstore/1.21.4-linux-amd64")) + }) + + Context("as human-readable info", func() { + BeforeEach(func() { + env.PrintInfo(PrintOverview) + }) + + It("should contain the version", func() { + Expect(outBuffer.String()).To(ContainSubstring("/kb's test store/k8s/1.21.3-linux-amd64")) + }) + It("should contain the path", func() { + Expect(outBuffer.String()).To(ContainSubstring("1.21.3")) + }) + It("should contain the platform", func() { + Expect(outBuffer.String()).To(ContainSubstring("linux/amd64")) + }) + + }) + Context("as just a path", func() { + It("should print out just the path", func() { + env.PrintInfo(PrintPath) + Expect(outBuffer.String()).To(Equal(`/kb's test store/k8s/1.21.3-linux-amd64`)) + }) + }) + + Context("as env vars", func() { + BeforeEach(func() { + env.PrintInfo(PrintEnv) + }) + It("should set KUBEBUILDER_ASSETS", func() { + Expect(outBuffer.String()).To(HavePrefix("export KUBEBUILDER_ASSETS=")) + }) + It("should quote the return path, escaping quotes to deal with spaces, etc", func() { + Expect(outBuffer.String()).To(HaveSuffix(`='/kb'"'"'s test store/k8s/1.21.3-linux-amd64'` + "\n")) + }) + }) + }) +}) diff --git a/tools/setup-envtest/env/exit.go b/tools/setup-envtest/env/exit.go new file mode 100644 index 0000000000..ae393b593b --- /dev/null +++ b/tools/setup-envtest/env/exit.go @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package env + +import ( + "errors" + "fmt" + "os" +) + +// Exit exits with the given code and error message. +// +// Defer HandleExitWithCode in main to catch this and get the right behavior. +func Exit(code int, msg string, args ...interface{}) { + panic(&exitCode{ + code: code, + err: fmt.Errorf(msg, args...), + }) +} + +// ExitCause exits with the given code and error message, automatically +// wrapping the underlying error passed as well. +// +// Defer HandleExitWithCode in main to catch this and get the right behavior. +func ExitCause(code int, err error, msg string, args ...interface{}) { + args = append(args, err) + panic(&exitCode{ + code: code, + err: fmt.Errorf(msg+": %w", args...), + }) +} + +// exitCode is an error that indicates, on a panic, to exit with the given code +// and message. +type exitCode struct { + code int + err error +} + +func (c *exitCode) Error() string { + return fmt.Sprintf("%v (exit code %d)", c.err, c.code) +} +func (c *exitCode) Unwrap() error { + return c.err +} + +// asExit checks if the given (panic) value is an exitCode error, +// and if so stores it in the given pointer. It's roughly analogous +// to errors.As, except it works on recover() values. +func asExit(val interface{}, exit **exitCode) bool { + if val == nil { + return false + } + err, isErr := val.(error) + if !isErr { + return false + } + if !errors.As(err, exit) { + return false + } + return true +} + +// HandleExitWithCode handles panics of type exitCode, +// printing the status message and existing with the given +// exit code, or re-raising if not an exitCode error. +// +// This should be the first defer in your main function. +func HandleExitWithCode() { + if cause := recover(); CheckRecover(cause, func(code int, err error) { + fmt.Fprintln(os.Stderr, err.Error()) + os.Exit(code) + }) { + panic(cause) + } +} + +// CheckRecover checks the value of cause, calling the given callback +// if it's an exitCode error. It returns true if we should re-panic +// the cause. +// +// It's mainly useful for testing, normally you'd use HandleExitWithCode. +func CheckRecover(cause interface{}, cb func(int, error)) bool { + if cause == nil { + return false + } + var exitErr *exitCode + if !asExit(cause, &exitErr) { + // re-raise if it's not an exit error + return true + } + + cb(exitErr.code, exitErr.err) + return false +} diff --git a/tools/setup-envtest/env/helpers.go b/tools/setup-envtest/env/helpers.go new file mode 100644 index 0000000000..2c98c88d95 --- /dev/null +++ b/tools/setup-envtest/env/helpers.go @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package env + +import ( + "fmt" + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +// orderPlatforms orders platforms by OS then arch. +func orderPlatforms(first, second versions.Platform) bool { + // sort by OS, then arch + if first.OS != second.OS { + return first.OS < second.OS + } + return first.Arch < second.Arch +} + +// PrintFormat indicates how to print out fetch and switch results. +// It's a valid pflag.Value so it can be used as a flag directly. +type PrintFormat int + +const ( + // PrintOverview prints human-readable data, + // including path, version, arch, and checksum (when available). + PrintOverview PrintFormat = iota + // PrintPath prints *only* the path, with no decoration. + PrintPath + // PrintEnv prints the path with the corresponding env variable, so that + // you can source the output like + // `source $(fetch-envtest switch -p env 1.20.x)`. + PrintEnv +) + +func (f PrintFormat) String() string { + switch f { + case PrintOverview: + return "overview" + case PrintPath: + return "path" + case PrintEnv: + return "env" + default: + panic(fmt.Sprintf("unexpected print format %d", int(f))) + } +} + +// Set sets the value of this as a flag. +func (f *PrintFormat) Set(val string) error { + switch val { + case "overview": + *f = PrintOverview + case "path": + *f = PrintPath + case "env": + *f = PrintEnv + default: + return fmt.Errorf("unknown print format %q, use one of overview|path|env", val) + } + return nil +} + +// Type is the type of this value as a flag. +func (PrintFormat) Type() string { + return "{overview|path|env}" +} diff --git a/tools/setup-envtest/go.mod b/tools/setup-envtest/go.mod new file mode 100644 index 0000000000..15c64f8b57 --- /dev/null +++ b/tools/setup-envtest/go.mod @@ -0,0 +1,29 @@ +module sigs.k8s.io/controller-runtime/tools/setup-envtest + +go 1.24.0 + +require ( + github.com/go-logr/logr v1.4.2 + github.com/go-logr/zapr v1.3.0 + github.com/onsi/ginkgo/v2 v2.22.2 + github.com/onsi/gomega v1.36.2 + github.com/spf13/afero v1.12.0 + github.com/spf13/pflag v1.0.6 + go.uber.org/zap v1.27.0 + k8s.io/apimachinery v0.34.1 + sigs.k8s.io/yaml v1.6.0 +) + +require ( + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + go.uber.org/multierr v1.10.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/text v0.23.0 // indirect + golang.org/x/tools v0.28.0 // indirect + google.golang.org/protobuf v1.36.5 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/tools/setup-envtest/go.sum b/tools/setup-envtest/go.sum new file mode 100644 index 0000000000..dfc8e7cce2 --- /dev/null +++ b/tools/setup-envtest/go.sum @@ -0,0 +1,52 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= +github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= +github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= +github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= +github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= +go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= +k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/tools/setup-envtest/main.go b/tools/setup-envtest/main.go new file mode 100644 index 0000000000..7eb5ec43d3 --- /dev/null +++ b/tools/setup-envtest/main.go @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package main + +import ( + goflag "flag" + "fmt" + "os" + "runtime" + + "github.com/go-logr/logr" + "github.com/go-logr/zapr" + "github.com/spf13/afero" + flag "github.com/spf13/pflag" + "go.uber.org/zap" + + envp "sigs.k8s.io/controller-runtime/tools/setup-envtest/env" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/remote" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/store" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/workflows" +) + +const ( + // envNoDownload is an env variable that can be set to always force + // the --installed-only, -i flag to be set. + envNoDownload = "ENVTEST_INSTALLED_ONLY" + // envUseEnv is an env variable that can be set to control the --use-env + // flag globally. + envUseEnv = "ENVTEST_USE_ENV" +) + +var ( + force = flag.Bool("force", false, "force re-downloading dependencies, even if they're already present and correct") + installedOnly = flag.BoolP("installed-only", "i", os.Getenv(envNoDownload) != "", + "only look at installed versions -- do not query the remote API server, "+ + "and error out if it would be necessary to") + verify = flag.Bool("verify", true, "verify dependencies while downloading") + useEnv = flag.Bool("use-env", os.Getenv(envUseEnv) != "", "whether to return the value of KUBEBUILDER_ASSETS if it's already set") + + targetOS = flag.String("os", runtime.GOOS, "os to download for (e.g. linux, darwin, for listing operations, use '*' to list all platforms)") + targetArch = flag.String("arch", runtime.GOARCH, "architecture to download for (e.g. amd64, for listing operations, use '*' to list all platforms)") + + // printFormat is the flag value for -p, --print. + printFormat = envp.PrintOverview + // zapLvl is the flag value for logging verbosity. + zapLvl = zap.WarnLevel + + binDir = flag.String("bin-dir", "", + "directory to store binary assets (default: $OS_SPECIFIC_DATA_DIR/envtest-binaries)") + + index = flag.String("index", remote.DefaultIndexURL, "index to discover envtest binaries") +) + +// TODO(directxman12): handle interrupts? + +// setupLogging configures a Zap logger. +func setupLogging() logr.Logger { + logCfg := zap.NewDevelopmentConfig() + logCfg.Level = zap.NewAtomicLevelAt(zapLvl) + zapLog, err := logCfg.Build() + if err != nil { + envp.ExitCause(1, err, "who logs the logger errors?") + } + return zapr.NewLogger(zapLog) +} + +// setupEnv initializes the environment from flags. +func setupEnv(globalLog logr.Logger, version string) *envp.Env { + log := globalLog.WithName("setup") + if *binDir == "" { + dataDir, err := store.DefaultStoreDir() + if err != nil { + envp.ExitCause(1, err, "unable to deterimine default binaries directory (use --bin-dir to manually override)") + } + + *binDir = dataDir + } + log.V(1).Info("using binaries directory", "dir", *binDir) + + client := &remote.HTTPClient{ + Log: globalLog.WithName("storage-client"), + IndexURL: *index, + } + log.V(1).Info("using HTTP client", "index", *index) + + env := &envp.Env{ + Log: globalLog, + Client: client, + VerifySum: *verify, + ForceDownload: *force, + NoDownload: *installedOnly, + Platform: versions.PlatformItem{ + Platform: versions.Platform{ + OS: *targetOS, + Arch: *targetArch, + }, + }, + FS: afero.Afero{Fs: afero.NewOsFs()}, + Store: store.NewAt(*binDir), + Out: os.Stdout, + } + + switch version { + case "", "latest": + env.Version = versions.LatestVersion + case "latest-on-disk": + // we sort by version, latest first, so this'll give us the latest on + // disk (as per the contract from env.List & store.List) + env.Version = versions.AnyVersion + env.NoDownload = true + default: + var err error + env.Version, err = versions.FromExpr(version) + if err != nil { + envp.ExitCause(1, err, "version be a valid version, or simply 'latest' or 'latest-on-disk'") + } + } + + env.CheckCoherence() + + return env +} + +func main() { + // exit with appropriate error codes -- this should be the first defer so + // that it's the last one executed. + defer envp.HandleExitWithCode() + + // set up flags + flag.Usage = func() { + name := os.Args[0] + fmt.Fprintf(os.Stderr, "Usage: %s [FLAGS] use|list|cleanup|sideload [VERSION]\n", name) + flag.PrintDefaults() + fmt.Fprintf(os.Stderr, + ` +Note: this command is currently alpha, and the usage/behavior may change from release to release. + +Examples: + + # download the latest envtest, and print out info about it + %[1]s use + + # download the latest 1.19 envtest, and print out the path + %[1]s use -p path 1.19.x! + + # switch to the most recent 1.21 envtest on disk + source <(%[1]s use -i -p env 1.21.x) + + # list all available local versions for darwin/amd64 + %[1]s list -i --os darwin --arch amd64 + + # remove all versions older than 1.16 from disk + %[1]s cleanup <1.16 + + # use the value from $KUBEBUILDER_ASSETS if set, otherwise follow the normal + # logic for 'use' + %[1]s --use-env + + # use the value from $KUBEBUILDER_ASSETS if set, otherwise use the latest + # installed version + %[1]s use -i --use-env + + # sideload a pre-downloaded tarball as Kubernetes 1.16.2 into our store + %[1]s sideload 1.16.2 < downloaded-envtest.tar.gz + +Commands: + + use: + get information for the requested version, downloading it if necessary and allowed. + Needs a concrete platform (no wildcards), but wildcard versions are supported. + + list: + list installed *and* available versions matching the given version & platform. + May have wildcard versions *and* platforms. + If the -i flag is passed, only installed versions are listed. + + cleanup: + remove all versions matching the given version & platform selector. + May have wildcard versions *and* platforms. + + sideload: + reads a .tar.gz file from stdin and expand it into the store. + must have a concrete version and platform. + + version: + list the installed version of setup-envtest. + +Versions: + + Versions take the form of a small subset of semver selectors. + + Basic semver whole versions are accepted: X.Y.Z. + Z may also be '*' or 'x' to match a wildcard. + You may also just write X.Y, which means X.Y.*. + + A version may be prefixed with '~' to match the most recent Z release + in the given Y release ( [X.Y.Z, X.Y+1.0) ). + + Finally, you may suffix the version with '!' to force checking the + remote API server for the latest version. + + For example: + + 1.16.x / 1.16.* / 1.16 # any 1.16 version + ~1.19.3 # any 1.19 version that's at least 1.19.3 + <1.17 # any release 1.17.x or below + 1.22.x! # the latest one 1.22 release available remotely + +Output: + + The fetch & switch commands respect the --print, -p flag. + + overview: human readable information + path: print out the path, by itself + env: print out the path in a form that can be sourced to use that version with envtest + + Other command have human-readable output formats only. + +Environment Variables: + + KUBEBUILDER_ASSETS: + --use-env will check this, and '-p/--print env' will return this. + If --use-env is true and this is set, we won't check our store + for versions -- we'll just immediately return whatever's in + this env var. + + %[2]s: + will switch the default of -i/--installed to true if set to any value + + %[3]s: + will switch the default of --use-env to true if set to any value + +`, name, envNoDownload, envUseEnv) + } + flag.CommandLine.AddGoFlag(&goflag.Flag{Name: "v", Usage: "logging level", Value: &zapLvl}) + flag.VarP(&printFormat, "print", "p", "what info to print after fetch-style commands (overview, path, env)") + needHelp := flag.Bool("help", false, "print out this help text") // register help so that we don't get an error at the end + flag.Parse() + + if *needHelp { + flag.Usage() + envp.Exit(2, "") + } + + // check our argument count + if numArgs := flag.NArg(); numArgs < 1 || numArgs > 2 { + flag.Usage() + envp.Exit(2, "please specify a command to use, and optionally a version selector") + } + + // set up logging + globalLog := setupLogging() + + // set up the environment + var version string + if flag.NArg() > 1 { + version = flag.Arg(1) + } + env := setupEnv(globalLog, version) + // perform our main set of actions + switch action := flag.Arg(0); action { + case "use": + workflows.Use{ + UseEnv: *useEnv, + PrintFormat: printFormat, + AssetsPath: os.Getenv("KUBEBUILDER_ASSETS"), + }.Do(env) + case "list": + workflows.List{}.Do(env) + case "cleanup": + workflows.Cleanup{}.Do(env) + case "sideload": + workflows.Sideload{ + Input: os.Stdin, + PrintFormat: printFormat, + }.Do(env) + case "version": + workflows.Version{}.Do(env) + default: + flag.Usage() + envp.Exit(2, "unknown action %q", action) + } +} diff --git a/tools/setup-envtest/remote/client.go b/tools/setup-envtest/remote/client.go new file mode 100644 index 0000000000..24efd6daff --- /dev/null +++ b/tools/setup-envtest/remote/client.go @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2024 The Kubernetes Authors + +package remote + +import ( + "context" + "io" + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +// Client is an interface to get and list envtest binary archives. +type Client interface { + ListVersions(ctx context.Context) ([]versions.Set, error) + + GetVersion(ctx context.Context, version versions.Concrete, platform versions.PlatformItem, out io.Writer) error + + FetchSum(ctx context.Context, ver versions.Concrete, pl *versions.PlatformItem) error +} diff --git a/tools/setup-envtest/remote/http_client.go b/tools/setup-envtest/remote/http_client.go new file mode 100644 index 0000000000..0339654a82 --- /dev/null +++ b/tools/setup-envtest/remote/http_client.go @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package remote + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + "sort" + + "github.com/go-logr/logr" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" + "sigs.k8s.io/yaml" +) + +// DefaultIndexURL is the default index used in HTTPClient. +var DefaultIndexURL = "https://raw.githubusercontent.com/kubernetes-sigs/controller-tools/HEAD/envtest-releases.yaml" + +var _ Client = &HTTPClient{} + +// HTTPClient is a client for fetching versions of the envtest binary archives +// from an index via HTTP. +type HTTPClient struct { + // Log allows us to log. + Log logr.Logger + + // IndexURL is the URL of the index, defaults to DefaultIndexURL. + IndexURL string +} + +// Index represents an index of envtest binary archives. Example: +// +// releases: +// v1.28.0: +// envtest-v1.28.0-darwin-amd64.tar.gz: +// hash: +// selfLink: +type Index struct { + // Releases maps Kubernetes versions to Releases (envtest archives). + Releases map[string]Release `json:"releases"` +} + +// Release maps an archive name to an archive. +type Release map[string]Archive + +// Archive contains the self link to an archive and its hash. +type Archive struct { + Hash string `json:"hash"` + SelfLink string `json:"selfLink"` +} + +// ListVersions lists all available tools versions in the index, along +// with supported os/arch combos and the corresponding hash. +// +// The results are sorted with newer versions first. +func (c *HTTPClient) ListVersions(ctx context.Context) ([]versions.Set, error) { + index, err := c.getIndex(ctx) + if err != nil { + return nil, err + } + + knownVersions := map[versions.Concrete][]versions.PlatformItem{} + for _, releases := range index.Releases { + for archiveName, archive := range releases { + ver, details := versions.ExtractWithPlatform(versions.ArchiveRE, archiveName) + if ver == nil { + c.Log.V(1).Info("skipping archive -- does not appear to be a versioned tools archive", "name", archiveName) + continue + } + c.Log.V(1).Info("found version", "version", ver, "platform", details) + knownVersions[*ver] = append(knownVersions[*ver], versions.PlatformItem{ + Platform: details, + Hash: &versions.Hash{ + Type: versions.SHA512HashType, + Encoding: versions.HexHashEncoding, + Value: archive.Hash, + }, + }) + } + } + + res := make([]versions.Set, 0, len(knownVersions)) + for ver, details := range knownVersions { + res = append(res, versions.Set{Version: ver, Platforms: details}) + } + // sort in inverse order so that the newest one is first + sort.Slice(res, func(i, j int) bool { + first, second := res[i].Version, res[j].Version + return first.NewerThan(second) + }) + + return res, nil +} + +// GetVersion downloads the given concrete version for the given concrete platform, writing it to the out. +func (c *HTTPClient) GetVersion(ctx context.Context, version versions.Concrete, platform versions.PlatformItem, out io.Writer) error { + index, err := c.getIndex(ctx) + if err != nil { + return err + } + + var loc *url.URL + var name string + for _, releases := range index.Releases { + for archiveName, archive := range releases { + ver, details := versions.ExtractWithPlatform(versions.ArchiveRE, archiveName) + if ver == nil { + c.Log.V(1).Info("skipping archive -- does not appear to be a versioned tools archive", "name", archiveName) + continue + } + + if *ver == version && details.OS == platform.OS && details.Arch == platform.Arch { + loc, err = url.Parse(archive.SelfLink) + if err != nil { + return fmt.Errorf("error parsing selfLink %q, %w", loc, err) + } + name = archiveName + break + } + } + } + if name == "" { + return fmt.Errorf("unable to find archive for %s (%s,%s)", version, platform.OS, platform.Arch) + } + + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) + if err != nil { + return fmt.Errorf("unable to construct request to fetch %s: %w", name, err) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("unable to fetch %s (%s): %w", name, req.URL, err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return fmt.Errorf("unable fetch %s (%s) -- got status %q", name, req.URL, resp.Status) + } + + return readBody(resp, out, name, platform) +} + +// FetchSum fetches the checksum for the given concrete version & platform into +// the given platform item. +func (c *HTTPClient) FetchSum(ctx context.Context, version versions.Concrete, platform *versions.PlatformItem) error { + index, err := c.getIndex(ctx) + if err != nil { + return err + } + + for _, releases := range index.Releases { + for archiveName, archive := range releases { + ver, details := versions.ExtractWithPlatform(versions.ArchiveRE, archiveName) + if ver == nil { + c.Log.V(1).Info("skipping archive -- does not appear to be a versioned tools archive", "name", archiveName) + continue + } + + if *ver == version && details.OS == platform.OS && details.Arch == platform.Arch { + platform.Hash = &versions.Hash{ + Type: versions.SHA512HashType, + Encoding: versions.HexHashEncoding, + Value: archive.Hash, + } + return nil + } + } + } + + return fmt.Errorf("unable to find archive for %s (%s,%s)", version, platform.OS, platform.Arch) +} + +func (c *HTTPClient) getIndex(ctx context.Context) (*Index, error) { + indexURL := c.IndexURL + if indexURL == "" { + indexURL = DefaultIndexURL + } + + loc, err := url.Parse(indexURL) + if err != nil { + return nil, fmt.Errorf("unable to parse index URL: %w", err) + } + + c.Log.V(1).Info("listing versions", "index", indexURL) + + req, err := http.NewRequestWithContext(ctx, "GET", loc.String(), nil) + if err != nil { + return nil, fmt.Errorf("unable to construct request to get index: %w", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("unable to perform request to get index: %w", err) + } + + defer resp.Body.Close() + if resp.StatusCode != 200 { + return nil, fmt.Errorf("unable to get index -- got status %q", resp.Status) + } + + responseBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to get index -- unable to read body %w", err) + } + + var index Index + if err := yaml.Unmarshal(responseBody, &index); err != nil { + return nil, fmt.Errorf("unable to unmarshal index: %w", err) + } + return &index, nil +} diff --git a/tools/setup-envtest/remote/read_body.go b/tools/setup-envtest/remote/read_body.go new file mode 100644 index 0000000000..1c71102897 --- /dev/null +++ b/tools/setup-envtest/remote/read_body.go @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2024 The Kubernetes Authors + +package remote + +import ( + "crypto/md5" + "crypto/sha512" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + "net/http" + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +func readBody(resp *http.Response, out io.Writer, archiveName string, platform versions.PlatformItem) error { + if platform.Hash != nil { + // stream in chunks to do the checksum, don't load the whole thing into + // memory to avoid causing issues with big files. + buf := make([]byte, 32*1024) // 32KiB, same as io.Copy + var hasher hash.Hash + switch platform.Hash.Type { + case versions.SHA512HashType: + hasher = sha512.New() + case versions.MD5HashType: + hasher = md5.New() + default: + return fmt.Errorf("hash type %s not implemented", platform.Hash.Type) + } + for cont := true; cont; { + amt, err := resp.Body.Read(buf) + if err != nil && !errors.Is(err, io.EOF) { + return fmt.Errorf("unable read next chunk of %s: %w", archiveName, err) + } + if amt > 0 { + // checksum never returns errors according to docs + hasher.Write(buf[:amt]) + if _, err := out.Write(buf[:amt]); err != nil { + return fmt.Errorf("unable write next chunk of %s: %w", archiveName, err) + } + } + cont = amt > 0 && !errors.Is(err, io.EOF) + } + + var sum string + switch platform.Hash.Encoding { + case versions.Base64HashEncoding: + sum = base64.StdEncoding.EncodeToString(hasher.Sum(nil)) + case versions.HexHashEncoding: + sum = hex.EncodeToString(hasher.Sum(nil)) + default: + return fmt.Errorf("hash encoding %s not implemented", platform.Hash.Encoding) + } + if sum != platform.Hash.Value { + return fmt.Errorf("checksum mismatch for %s: %s (computed) != %s (reported)", archiveName, sum, platform.Hash.Value) + } + } else if _, err := io.Copy(out, resp.Body); err != nil { + return fmt.Errorf("unable to download %s: %w", archiveName, err) + } + return nil +} diff --git a/tools/setup-envtest/store/helpers.go b/tools/setup-envtest/store/helpers.go new file mode 100644 index 0000000000..30902187e9 --- /dev/null +++ b/tools/setup-envtest/store/helpers.go @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package store + +import ( + "errors" + "os" + "path/filepath" + "runtime" +) + +// DefaultStoreDir returns the default location for the store. +// It's dependent on operating system: +// +// - Windows: %LocalAppData%\kubebuilder-envtest +// - OSX: ~/Library/Application Support/io.kubebuilder.envtest +// - Others: ${XDG_DATA_HOME:-~/.local/share}/kubebuilder-envtest +// +// Otherwise, it errors out. Note that these paths must not be relied upon +// manually. +func DefaultStoreDir() (string, error) { + var baseDir string + + // find the base data directory + switch runtime.GOOS { + case "windows": + baseDir = os.Getenv("LocalAppData") + if baseDir == "" { + return "", errors.New("%LocalAppData% is not defined") + } + case "darwin", "ios": + homeDir := os.Getenv("HOME") + if homeDir == "" { + return "", errors.New("$HOME is not defined") + } + baseDir = filepath.Join(homeDir, "Library/Application Support") + default: + baseDir = os.Getenv("XDG_DATA_HOME") + if baseDir == "" { + homeDir := os.Getenv("HOME") + if homeDir == "" { + return "", errors.New("neither $XDG_DATA_HOME nor $HOME are defined") + } + baseDir = filepath.Join(homeDir, ".local/share") + } + } + + // append our program-specific dir to it (OSX has a slightly different + // convention so try to follow that). + switch runtime.GOOS { + case "darwin", "ios": + return filepath.Join(baseDir, "io.kubebuilder.envtest"), nil + default: + return filepath.Join(baseDir, "kubebuilder-envtest"), nil + } +} diff --git a/tools/setup-envtest/store/store.go b/tools/setup-envtest/store/store.go new file mode 100644 index 0000000000..bb5a1f7bcd --- /dev/null +++ b/tools/setup-envtest/store/store.go @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package store + +import ( + "archive/tar" + "compress/gzip" + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sort" + + "github.com/go-logr/logr" + "github.com/spf13/afero" + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +// TODO(directxman12): error messages don't show full path, which is gonna make +// things hard to debug + +// Item is a version-platform pair. +type Item struct { + Version versions.Concrete + Platform versions.Platform +} + +// dirName returns the directory name in the store for this item. +func (i Item) dirName() string { + return i.Platform.BaseName(i.Version) +} +func (i Item) String() string { + return fmt.Sprintf("%s (%s)", i.Version, i.Platform) +} + +// Filter is a version spec & platform selector (i.e. platform +// potentially with wildcards) to filter store items. +type Filter struct { + Version versions.Spec + Platform versions.Platform +} + +// Matches checks if this filter matches the given item. +func (f Filter) Matches(item Item) bool { + return f.Version.Matches(item.Version) && f.Platform.Matches(item.Platform) +} + +// Store knows how to list, load, store, and delete envtest tools. +type Store struct { + // Root is the root FS that the store stores in. You'll probably + // want to use a BasePathFS to scope it down to a particular directory. + // + // Note that if for some reason there are nested BasePathFSes, and they're + // interrupted by a non-BasePathFS, Path won't work properly. + Root afero.Fs +} + +// NewAt creates a new store on disk at the given path. +func NewAt(path string) *Store { + return &Store{ + Root: afero.NewBasePathFs(afero.NewOsFs(), path), + } +} + +// Initialize ensures that the store is all set up on disk, etc. +func (s *Store) Initialize(ctx context.Context) error { + log, err := logr.FromContext(ctx) + if err != nil { + return err + } + + log.V(1).Info("ensuring base binaries dir exists") + if err := s.unpackedBase().MkdirAll("", 0755); err != nil { + return fmt.Errorf("unable to make sure base binaries dir exists: %w", err) + } + return nil +} + +// Has checks if an item exists in the store. +func (s *Store) Has(item Item) (bool, error) { + path := s.unpackedPath(item.dirName()) + _, err := path.Stat("") + if err != nil && !errors.Is(err, afero.ErrFileNotFound) { + return false, fmt.Errorf("unable to check if version-platform dir exists: %w", err) + } + return err == nil, nil +} + +// List lists all items matching the given filter. +// +// Results are stored by version (newest first), and OS/arch (consistently, +// but no guaranteed ordering). +func (s *Store) List(ctx context.Context, matching Filter) ([]Item, error) { + var res []Item + if err := s.eachItem(ctx, matching, func(_ string, item Item) { + res = append(res, item) + }); err != nil { + return nil, fmt.Errorf("unable to list version-platform pairs in store: %w", err) + } + + sort.Slice(res, func(i, j int) bool { + if !res[i].Version.Matches(res[j].Version) { + return res[i].Version.NewerThan(res[j].Version) + } + return orderPlatforms(res[i].Platform, res[j].Platform) + }) + + return res, nil +} + +// Add adds this item to the store, with the given contents (a .tar.gz file). +func (s *Store) Add(ctx context.Context, item Item, contents io.Reader) (resErr error) { + log, err := logr.FromContext(ctx) + if err != nil { + return err + } + + itemName := item.dirName() + log = log.WithValues("version-platform", itemName) + itemPath := s.unpackedPath(itemName) + + // make sure to clean up if we hit an error + defer func() { + if resErr != nil { + // intentially ignore this because we can't really do anything + err := s.removeItem(itemPath) + if err != nil { + log.Error(err, "unable to clean up partially added version-platform pair after error") + } + } + }() + + log.V(1).Info("ensuring version-platform binaries dir exists and is empty & writable") + _, err = itemPath.Stat("") + if err != nil && !errors.Is(err, afero.ErrFileNotFound) { + return fmt.Errorf("unable to ensure version-platform binaries dir %s exists", itemName) + } + if err == nil { // exists + log.V(1).Info("cleaning up old version-platform binaries dir") + if err := s.removeItem(itemPath); err != nil { + return fmt.Errorf("unable to clean up existing version-platform binaries dir %s", itemName) + } + } + if err := itemPath.MkdirAll("", 0755); err != nil { + return fmt.Errorf("unable to make sure entry dir %s exists", itemName) + } + + log.V(1).Info("extracting archive") + gzStream, err := gzip.NewReader(contents) + if err != nil { + return fmt.Errorf("unable to start un-gz-ing entry archive") + } + tarReader := tar.NewReader(gzStream) + + var header *tar.Header + for header, err = tarReader.Next(); err == nil; header, err = tarReader.Next() { + if header.Typeflag != tar.TypeReg { // TODO(directxman12): support symlinks, etc? + log.V(1).Info("skipping non-regular-file entry in archive", "entry", header.Name) + continue + } + // just dump all files to the main path, ignoring the prefixed directory + // paths -- they're redundant. We also ignore bits for the most part (except for X), + // preferfing our own scheme. + targetPath := filepath.Base(header.Name) + log.V(1).Info("writing archive file to disk", "archive file", header.Name, "on-disk file", targetPath) + perms := 0555 & header.Mode // make sure we're at most r+x + binOut, err := itemPath.OpenFile(targetPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, os.FileMode(perms)) + if err != nil { + return fmt.Errorf("unable to create file %s from archive to disk for version-platform pair %s", targetPath, itemName) + } + if err := func() error { // IIFE to get the defer properly in a loop + defer binOut.Close() + if _, err := io.Copy(binOut, tarReader); err != nil { + return fmt.Errorf("unable to write file %s from archive to disk for version-platform pair %s", targetPath, itemName) + } + return nil + }(); err != nil { + return err + } + } + if err != nil && !errors.Is(err, io.EOF) { //nolint:govet + return fmt.Errorf("unable to finish un-tar-ing the downloaded archive: %w", err) + } + log.V(1).Info("unpacked archive") + + log.V(1).Info("switching version-platform directory to read-only") + if err := itemPath.Chmod("", 0555); err != nil { + // don't bail, this isn't fatal + log.Error(err, "unable to make version-platform directory read-only") + } + return nil +} + +// Remove removes all items matching the given filter. +// +// It returns a list of the successfully removed items (even in the case +// of an error). +func (s *Store) Remove(ctx context.Context, matching Filter) ([]Item, error) { + log, err := logr.FromContext(ctx) + if err != nil { + return nil, err + } + + var removed []Item + var savedErr error + if err := s.eachItem(ctx, matching, func(name string, item Item) { + log.V(1).Info("Removing version-platform pair at path", "version-platform", item, "path", name) + + if err := s.removeItem(s.unpackedPath(name)); err != nil { + log.Error(err, "unable to make existing version-platform dir writable to clean it up", "path", name) + savedErr = fmt.Errorf("unable to remove version-platform pair %s (dir %s): %w", item, name, err) + return // don't mark this as removed in the report + } + removed = append(removed, item) + }); err != nil { + return removed, fmt.Errorf("unable to list version-platform pairs to figure out what to delete: %w", err) + } + if savedErr != nil { + return removed, savedErr + } + return removed, nil +} + +// Path returns an actual path that case be used to access this item. +func (s *Store) Path(item Item) (string, error) { + path := s.unpackedPath(item.dirName()) + // NB(directxman12): we need root's realpath because RealPath only + // looks at its own path, and so thus doesn't prepend the underlying + // root's base path. + // + // Technically, if we're fed something that's double wrapped as root, + // this'll be wrong, but this is basically as much as we can do + return afero.FullBaseFsPath(path.(*afero.BasePathFs), ""), nil +} + +// unpackedBase returns the directory in which item dirs lives. +func (s *Store) unpackedBase() afero.Fs { + return afero.NewBasePathFs(s.Root, "k8s") +} + +// unpackedPath returns the item dir with this name. +func (s *Store) unpackedPath(name string) afero.Fs { + return afero.NewBasePathFs(s.unpackedBase(), name) +} + +// eachItem iterates through the on-disk versions that match our version & platform selector, +// calling the callback for each. +func (s *Store) eachItem(ctx context.Context, filter Filter, cb func(name string, item Item)) error { + log, err := logr.FromContext(ctx) + if err != nil { + return err + } + + entries, err := afero.ReadDir(s.unpackedBase(), "") + if err != nil { + return fmt.Errorf("unable to list folders in store's unpacked directory: %w", err) + } + + for _, entry := range entries { + if !entry.IsDir() { + log.V(1).Info("skipping dir entry, not a folder", "entry", entry.Name()) + continue + } + ver, pl := versions.ExtractWithPlatform(versions.VersionPlatformRE, entry.Name()) + if ver == nil { + log.V(1).Info("skipping dir entry, not a version", "entry", entry.Name()) + continue + } + item := Item{Version: *ver, Platform: pl} + + if !filter.Matches(item) { + log.V(1).Info("skipping on disk version, does not match version and platform selectors", "platform", pl, "version", ver, "entry", entry.Name()) + continue + } + + cb(entry.Name(), item) + } + + return nil +} + +// removeItem removes the given item directory from disk. +func (s *Store) removeItem(itemDir afero.Fs) error { + if err := itemDir.Chmod("", 0755); err != nil { + // no point in trying to remove if we can't fix the permissions, bail here + return fmt.Errorf("unable to make version-platform dir writable: %w", err) + } + if err := itemDir.RemoveAll(""); err != nil && !errors.Is(err, afero.ErrFileNotFound) { + return fmt.Errorf("unable to remove version-platform dir: %w", err) + } + return nil +} + +// orderPlatforms orders platforms by OS then arch. +func orderPlatforms(first, second versions.Platform) bool { + // sort by OS, then arch + if first.OS != second.OS { + return first.OS < second.OS + } + return first.Arch < second.Arch +} diff --git a/tools/setup-envtest/store/store_suite_test.go b/tools/setup-envtest/store/store_suite_test.go new file mode 100644 index 0000000000..649c22d545 --- /dev/null +++ b/tools/setup-envtest/store/store_suite_test.go @@ -0,0 +1,51 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store_test + +import ( + "context" + "testing" + + "github.com/go-logr/logr" + "github.com/go-logr/zapr" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var testLog logr.Logger + +func zapLogger() logr.Logger { + testOut := zapcore.AddSync(GinkgoWriter) + enc := zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()) + // bleh setting up logging to the ginkgo writer is annoying + zapLog := zap.New(zapcore.NewCore(enc, testOut, zap.DebugLevel), + zap.ErrorOutput(testOut), zap.Development(), zap.AddStacktrace(zap.WarnLevel)) + return zapr.NewLogger(zapLog) +} + +func logCtx(ctx context.Context) context.Context { + return logr.NewContext(ctx, testLog) +} + +func TestStore(t *testing.T) { + testLog = zapLogger() + RegisterFailHandler(Fail) + RunSpecs(t, "Store Suite") +} diff --git a/tools/setup-envtest/store/store_test.go b/tools/setup-envtest/store/store_test.go new file mode 100644 index 0000000000..575d49dd3b --- /dev/null +++ b/tools/setup-envtest/store/store_test.go @@ -0,0 +1,250 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package store_test + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/rand" + "io" + "io/fs" + "path/filepath" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/spf13/afero" + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/store" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +const ( + fakeStorePath = "/path/to/the/store" +) + +var _ = Describe("Store", func() { + var st *store.Store + BeforeEach(func() { + fs := afero.NewMemMapFs() + fakeStoreFiles(fs, fakeStorePath) + st = &store.Store{ + Root: afero.NewBasePathFs(fs, fakeStorePath), + } + }) + Describe("initialization", func() { + It("should ensure the repo root exists", func(ctx SpecContext) { + // remove the old dir + Expect(st.Root.RemoveAll("")).To(Succeed(), "should be able to remove the store before trying to initialize") + + Expect(st.Initialize(logCtx(ctx))).To(Succeed(), "initialization should succeed") + Expect(st.Root.Stat("k8s")).NotTo(BeNil(), "store's binary dir should exist") + }) + + It("should be fine if the repo root already exists", func(ctx SpecContext) { + Expect(st.Initialize(logCtx(ctx))).To(Succeed()) + }) + }) + Describe("listing items", func() { + It("should filter results by the given filter, sorted in version order (newest first)", func(ctx SpecContext) { + sel, err := versions.FromExpr("<=1.16") + Expect(err).NotTo(HaveOccurred(), "should be able to construct <=1.16 selector") + Expect(st.List(logCtx(ctx), store.Filter{ + Version: sel, + Platform: versions.Platform{OS: "*", Arch: "amd64"}, + })).To(Equal([]store.Item{ + {Version: ver(1, 16, 2), Platform: versions.Platform{OS: "ifonlysingularitywasstillathing", Arch: "amd64"}}, + {Version: ver(1, 16, 1), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + {Version: ver(1, 16, 0), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + {Version: ver(1, 14, 26), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + })) + }) + It("should skip non-folders in the store", func(ctx SpecContext) { + Expect(afero.WriteFile(st.Root, "k8s/2.3.6-linux-amd128", []byte{0x01}, fs.ModePerm)).To(Succeed(), "should be able to create a non-store file in the store directory") + Expect(st.List(logCtx(ctx), store.Filter{ + Version: versions.AnyVersion, Platform: versions.Platform{OS: "linux", Arch: "amd128"}, + })).To(BeEmpty()) + }) + + It("should skip non-matching names in the store", func(ctx SpecContext) { + Expect(st.Root.Mkdir("k8s/somedir-2.3.6-linux-amd128", fs.ModePerm)).To(Succeed(), "should be able to create a non-store file in the store directory") + Expect(st.List(logCtx(ctx), store.Filter{ + Version: versions.AnyVersion, Platform: versions.Platform{OS: "linux", Arch: "amd128"}, + })).To(BeEmpty()) + }) + }) + + Describe("removing items", func() { + var res []store.Item + BeforeEach(func(ctx SpecContext) { + sel, err := versions.FromExpr("<=1.16") + Expect(err).NotTo(HaveOccurred(), "should be able to construct <=1.16 selector") + res, err = st.Remove(logCtx(ctx), store.Filter{ + Version: sel, + Platform: versions.Platform{OS: "*", Arch: "amd64"}, + }) + Expect(err).NotTo(HaveOccurred(), "should be able to remove <=1.16 & */amd64") + }) + It("should return all items removed", func() { + Expect(res).To(ConsistOf( + store.Item{Version: ver(1, 16, 2), Platform: versions.Platform{OS: "ifonlysingularitywasstillathing", Arch: "amd64"}}, + store.Item{Version: ver(1, 16, 1), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + store.Item{Version: ver(1, 16, 0), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + store.Item{Version: ver(1, 14, 26), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + )) + }) + It("should remove all items matching the given filter from disk", func() { + Expect(afero.ReadDir(st.Root, "k8s")).NotTo(ContainElements( + WithTransform(fs.FileInfo.Name, Equal("1.16.2-ifonlysingularitywasstillathing-amd64")), + WithTransform(fs.FileInfo.Name, Equal("1.16.1-linux-amd64")), + WithTransform(fs.FileInfo.Name, Equal("1.16.0-linux-amd64")), + WithTransform(fs.FileInfo.Name, Equal("1.14.26-linux-amd64")), + )) + }) + + It("should leave items that don't match in place", func() { + Expect(afero.ReadDir(st.Root, "k8s")).To(ContainElements( + WithTransform(fs.FileInfo.Name, Equal("1.17.9-linux-amd64")), + WithTransform(fs.FileInfo.Name, Equal("1.16.2-linux-yourimagination")), + WithTransform(fs.FileInfo.Name, Equal("1.14.26-hyperwarp-pixiedust")), + )) + }) + }) + + Describe("adding items (controller-tools archives)", func() { + archiveName := "envtest-v1.16.3-linux-amd64.tar.gz" + + It("should support .tar.gz input", func(ctx SpecContext) { + Expect(st.Add(logCtx(ctx), newItem, makeFakeArchive(archiveName, "controller-tools/envtest/"))).To(Succeed()) + Expect(st.Has(newItem)).To(BeTrue(), "should have the item after adding it") + }) + + It("should extract binaries from the given archive to a directly to the item's directory, regardless of path", func(ctx SpecContext) { + Expect(st.Add(logCtx(ctx), newItem, makeFakeArchive(archiveName, "controller-tools/envtest/"))).To(Succeed()) + + dirName := newItem.Platform.BaseName(newItem.Version) + Expect(afero.ReadFile(st.Root, filepath.Join("k8s", dirName, "some-file"))).To(HavePrefix(archiveName + "some-file")) + Expect(afero.ReadFile(st.Root, filepath.Join("k8s", dirName, "other-file"))).To(HavePrefix(archiveName + "other-file")) + }) + + It("should clean up any existing item directory before creating the new one", func(ctx SpecContext) { + item := localVersions[0] + Expect(st.Add(logCtx(ctx), item, makeFakeArchive(archiveName, "controller-tools/envtest/"))).To(Succeed()) + Expect(st.Root.Stat(filepath.Join("k8s", item.Platform.BaseName(item.Version)))).NotTo(BeNil(), "new files should exist") + }) + It("should clean up if it errors before finishing", func(ctx SpecContext) { + item := localVersions[0] + Expect(st.Add(logCtx(ctx), item, new(bytes.Buffer))).NotTo(Succeed(), "should fail to extract") + _, err := st.Root.Stat(filepath.Join("k8s", item.Platform.BaseName(item.Version))) + Expect(err).To(HaveOccurred(), "the binaries dir for the item should be gone") + + }) + }) + + Describe("checking if items are present", func() { + It("should report that present directories are present", func() { + Expect(st.Has(localVersions[0])).To(BeTrue()) + }) + + It("should report that absent directories are absent", func() { + Expect(st.Has(newItem)).To(BeFalse()) + }) + }) + + Describe("getting the path", func() { + It("should return the absolute on-disk path of the given item", func() { + item := localVersions[0] + Expect(st.Path(item)).To(Equal(filepath.Join(fakeStorePath, "k8s", item.Platform.BaseName(item.Version)))) + }) + }) +}) + +var ( + // keep this sorted. + localVersions = []store.Item{ + {Version: ver(1, 17, 9), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + {Version: ver(1, 16, 2), Platform: versions.Platform{OS: "linux", Arch: "yourimagination"}}, + {Version: ver(1, 16, 2), Platform: versions.Platform{OS: "ifonlysingularitywasstillathing", Arch: "amd64"}}, + {Version: ver(1, 16, 1), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + {Version: ver(1, 16, 0), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + {Version: ver(1, 14, 26), Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + {Version: ver(1, 14, 26), Platform: versions.Platform{OS: "hyperwarp", Arch: "pixiedust"}}, + } + + newItem = store.Item{ + Version: ver(1, 16, 3), + Platform: versions.Platform{OS: "linux", Arch: "amd64"}, + } +) + +func ver(major, minor, patch int) versions.Concrete { + return versions.Concrete{ + Major: major, + Minor: minor, + Patch: patch, + } +} + +func makeFakeArchive(magic, relativePath string) io.Reader { + out := new(bytes.Buffer) + gzipWriter := gzip.NewWriter(out) + tarWriter := tar.NewWriter(gzipWriter) + Expect(tarWriter.WriteHeader(&tar.Header{ + Typeflag: tar.TypeDir, + Name: relativePath, // so we can ensure we skip non-files + Mode: 0777, + })).To(Succeed()) + for _, fileName := range []string{"some-file", "other-file"} { + // create fake file contents: magic+fileName+randomBytes() + var chunk [1024 * 48]byte // 1.5 times our chunk read size in GetVersion + copy(chunk[:], magic) + copy(chunk[len(magic):], fileName) + start := len(magic) + len(fileName) + if _, err := rand.Read(chunk[start:]); err != nil { + panic(err) + } + + // write to relativePath/fileName + err := tarWriter.WriteHeader(&tar.Header{ + Name: relativePath + fileName, + Size: int64(len(chunk[:])), + Mode: 0777, // so we can check that we fix this later + }) + if err != nil { + panic(err) + } + _, err = tarWriter.Write(chunk[:]) + if err != nil { + panic(err) + } + } + tarWriter.Close() + gzipWriter.Close() + + return out +} + +func fakeStoreFiles(fs afero.Fs, dir string) { + By("making the unpacked directory") + unpackedBase := filepath.Join(dir, "k8s") + Expect(fs.Mkdir(unpackedBase, 0755)).To(Succeed()) + + By("making some fake (empty) versions") + for _, item := range localVersions { + Expect(fs.Mkdir(filepath.Join(unpackedBase, item.Platform.BaseName(item.Version)), 0755)).To(Succeed()) + } +} diff --git a/tools/setup-envtest/version/version.go b/tools/setup-envtest/version/version.go new file mode 100644 index 0000000000..1d148b085d --- /dev/null +++ b/tools/setup-envtest/version/version.go @@ -0,0 +1,21 @@ +package version + +import "runtime/debug" + +// Version to be set using ldflags: +// -ldflags "-X sigs.k8s.io/controller-runtime/tools/setup-envtest/version.version=v1.0.0" +// falls back to module information is unse +var version = "" + +// Version returns the version of the main module +func Version() string { + if version != "" { + return version + } + info, ok := debug.ReadBuildInfo() + if !ok || info == nil || info.Main.Version == "" { + // binary has not been built with module support or doesn't contain a version. + return "(unknown)" + } + return info.Main.Version +} diff --git a/tools/setup-envtest/version/version_suite_test.go b/tools/setup-envtest/version/version_suite_test.go new file mode 100644 index 0000000000..99c623e8d4 --- /dev/null +++ b/tools/setup-envtest/version/version_suite_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2024 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestVersioning(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Test Version Suite") +} diff --git a/tools/setup-envtest/version/version_test.go b/tools/setup-envtest/version/version_test.go new file mode 100644 index 0000000000..4178cac870 --- /dev/null +++ b/tools/setup-envtest/version/version_test.go @@ -0,0 +1,54 @@ +/* +Copyright 2024 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + +See the License for the specific language governing permissions and + +limitations under the License. +*/ + +package version + +import ( + "runtime/debug" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("TestVersion", func() { + + info, ok := debug.ReadBuildInfo() + Expect(ok).To(BeTrue()) + tests := map[string]struct { + version string + expected string + }{ + "empty returns build info": { + version: "", + expected: info.Main.Version, + }, + "set to a value returns it": { + version: "1.2.3", + expected: "1.2.3", + }, + } + for name, tc := range tests { + It("Version set to "+name, func() { + versionBackup := version + defer func() { + version = versionBackup + }() + version = tc.version + result := Version() + Expect(result).To(Equal(tc.expected)) + }) + } +}) diff --git a/tools/setup-envtest/versions/misc_test.go b/tools/setup-envtest/versions/misc_test.go new file mode 100644 index 0000000000..a609f4dc60 --- /dev/null +++ b/tools/setup-envtest/versions/misc_test.go @@ -0,0 +1,143 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versions_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + . "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +var _ = Describe("Concrete", func() { + It("should match the only same version", func() { + ver16 := Concrete{Major: 1, Minor: 16} + ver17 := Concrete{Major: 1, Minor: 17} + Expect(ver16.Matches(ver16)).To(BeTrue(), "should match the same version") + Expect(ver16.Matches(ver17)).To(BeFalse(), "should not match a different version") + }) + It("should serialize as X.Y.Z", func() { + Expect(Concrete{Major: 1, Minor: 16, Patch: 3}.String()).To(Equal("1.16.3")) + }) + Describe("when ordering relative to other versions", func() { + ver1163 := Concrete{Major: 1, Minor: 16, Patch: 3} + Specify("newer patch should be newer", func() { + Expect(ver1163.NewerThan(Concrete{Major: 1, Minor: 16})).To(BeTrue()) + }) + Specify("newer minor should be newer", func() { + Expect(ver1163.NewerThan(Concrete{Major: 1, Minor: 15, Patch: 3})).To(BeTrue()) + }) + Specify("newer major should be newer", func() { + Expect(ver1163.NewerThan(Concrete{Major: 0, Minor: 16, Patch: 3})).To(BeTrue()) + }) + }) +}) + +var _ = Describe("Platform", func() { + Specify("a concrete platform should match exactly itself", func() { + plat1 := Platform{OS: "linux", Arch: "amd64"} + plat2 := Platform{OS: "linux", Arch: "s390x"} + plat3 := Platform{OS: "windows", Arch: "amd64"} + Expect(plat1.Matches(plat1)).To(BeTrue(), "should match itself") + Expect(plat1.Matches(plat2)).To(BeFalse(), "should reject a different arch") + Expect(plat1.Matches(plat3)).To(BeFalse(), "should reject a different os") + }) + Specify("a wildcard arch should match any arch", func() { + sel := Platform{OS: "linux", Arch: "*"} + plat1 := Platform{OS: "linux", Arch: "amd64"} + plat2 := Platform{OS: "linux", Arch: "s390x"} + plat3 := Platform{OS: "windows", Arch: "amd64"} + Expect(sel.Matches(sel)).To(BeTrue(), "should match itself") + Expect(sel.Matches(plat1)).To(BeTrue(), "should match some arch with the same OS") + Expect(sel.Matches(plat2)).To(BeTrue(), "should match another arch with the same OS") + Expect(plat1.Matches(plat3)).To(BeFalse(), "should reject a different os") + }) + Specify("a wildcard os should match any os", func() { + sel := Platform{OS: "*", Arch: "amd64"} + plat1 := Platform{OS: "linux", Arch: "amd64"} + plat2 := Platform{OS: "windows", Arch: "amd64"} + plat3 := Platform{OS: "linux", Arch: "s390x"} + Expect(sel.Matches(sel)).To(BeTrue(), "should match itself") + Expect(sel.Matches(plat1)).To(BeTrue(), "should match some os with the same arch") + Expect(sel.Matches(plat2)).To(BeTrue(), "should match another os with the same arch") + Expect(plat1.Matches(plat3)).To(BeFalse(), "should reject a different arch") + }) + It("should report a wildcard OS as a wildcard platform", func() { + Expect(Platform{OS: "*", Arch: "amd64"}.IsWildcard()).To(BeTrue()) + }) + It("should report a wildcard arch as a wildcard platform", func() { + Expect(Platform{OS: "linux", Arch: "*"}.IsWildcard()).To(BeTrue()) + }) + It("should serialize as os/arch", func() { + Expect(Platform{OS: "linux", Arch: "amd64"}.String()).To(Equal("linux/amd64")) + }) + + Specify("knows how to produce a base store name", func() { + plat := Platform{OS: "linux", Arch: "amd64"} + ver := Concrete{Major: 1, Minor: 16, Patch: 3} + Expect(plat.BaseName(ver)).To(Equal("1.16.3-linux-amd64")) + }) + + Specify("knows how to produce an archive name", func() { + plat := Platform{OS: "linux", Arch: "amd64"} + ver := Concrete{Major: 1, Minor: 16, Patch: 3} + Expect(plat.ArchiveName(ver)).To(Equal("envtest-v1.16.3-linux-amd64.tar.gz")) + }) + + Describe("parsing", func() { + Context("for version-platform names", func() { + It("should accept strings of the form x.y.z-os-arch", func() { + ver, plat := ExtractWithPlatform(VersionPlatformRE, "1.16.3-linux-amd64") + Expect(ver).To(Equal(&Concrete{Major: 1, Minor: 16, Patch: 3})) + Expect(plat).To(Equal(Platform{OS: "linux", Arch: "amd64"})) + }) + It("should reject nonsense strings", func() { + ver, _ := ExtractWithPlatform(VersionPlatformRE, "1.16-linux-amd64") + Expect(ver).To(BeNil()) + }) + }) + Context("for archive names (controller-tools)", func() { + It("should accept strings of the form envtest-vx.y.z-os-arch.tar.gz", func() { + ver, plat := ExtractWithPlatform(ArchiveRE, "envtest-v1.16.3-linux-amd64.tar.gz") + Expect(ver).To(Equal(&Concrete{Major: 1, Minor: 16, Patch: 3})) + Expect(plat).To(Equal(Platform{OS: "linux", Arch: "amd64"})) + }) + It("should reject nonsense strings", func() { + ver, _ := ExtractWithPlatform(ArchiveRE, "envtest-v1.16.3-linux-amd64.tar.sum") + Expect(ver).To(BeNil()) + }) + }) + }) +}) + +var _ = Describe("Spec helpers", func() { + Specify("can fill a spec with a concrete version", func() { + spec := Spec{Selector: AnySelector{}} // don't just use AnyVersion so we don't modify it + spec.MakeConcrete(Concrete{Major: 1, Minor: 16}) + Expect(spec.AsConcrete()).To(Equal(&Concrete{Major: 1, Minor: 16})) + }) + It("should serialize as the underlying selector with ! for check latest", func() { + spec, err := FromExpr("1.16.*!") + Expect(err).NotTo(HaveOccurred()) + Expect(spec.String()).To(Equal("1.16.*!")) + }) + It("should serialize as the underlying selector by itself if not check latest", func() { + spec, err := FromExpr("1.16.*") + Expect(err).NotTo(HaveOccurred()) + Expect(spec.String()).To(Equal("1.16.*")) + }) +}) diff --git a/tools/setup-envtest/versions/parse.go b/tools/setup-envtest/versions/parse.go new file mode 100644 index 0000000000..cd25710b2b --- /dev/null +++ b/tools/setup-envtest/versions/parse.go @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package versions + +import ( + "fmt" + "regexp" + "strconv" +) + +var ( + // baseVersionRE is a semver-ish version -- either X.Y.Z, X.Y, or X.Y.{*|x}. + baseVersionRE = `(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)(?:\.(?P0|[1-9]\d*|x|\*))?` + // versionExprRe matches valid version input for FromExpr. + versionExprRE = regexp.MustCompile(`^(?P<|~|<=)?` + baseVersionRE + `(?P!)?$`) + + // ConcreteVersionRE matches a concrete version anywhere in the string. + ConcreteVersionRE = regexp.MustCompile(`(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)`) +) + +// FromExpr extracts a version from a string in the form of a semver version, +// where X, Y, and Z may also be wildcards ('*', 'x'), +// and pre-release names & numbers may also be wildcards. The prerelease section is slightly +// restricted to match what k8s does. +// The whole string is a version selector as follows: +// - X.Y.Z matches version X.Y.Z where x, y, and z are +// are ints >= 0, and Z may be '*' or 'x' +// - X.Y is equivalent to X.Y.* +// - ~X.Y.Z means >= X.Y.Z && < X.Y+1.0 +// - = comparisons, if we use + // wildcards with a selector we can just set them to zero. + if verInfo.Patch == AnyPoint { + verInfo.Patch = PointVersion(0) + } + baseVer := *verInfo.AsConcrete() + spec.Selector = TildeSelector{Concrete: baseVer} + default: + panic("unreachable: mismatch between FromExpr and its RE in selector") + } + + return spec, nil +} + +// PointVersionFromValidString extracts a point version +// from the corresponding string representation, which may +// be a number >= 0, or x|* (AnyPoint). +// +// Anything else will cause a panic (use this on strings +// extracted from regexes). +func PointVersionFromValidString(str string) PointVersion { + switch str { + case "*", "x": + return AnyPoint + default: + ver, err := strconv.Atoi(str) + if err != nil { + panic(err) + } + return PointVersion(ver) + } +} + +// PatchSelectorFromMatch constructs a simple selector according to the +// ParseExpr rules out of pre-validated sections. +// +// re must include name captures for major, minor, patch, prenum, and prelabel +// +// Any bad input may cause a panic. Use with when you got the parts from an RE match. +func PatchSelectorFromMatch(match []string, re *regexp.Regexp) PatchSelector { + // already parsed via RE, should be fine to ignore errors unless it's a + // *huge* number + major, err := strconv.Atoi(match[re.SubexpIndex("major")]) + if err != nil { + panic("invalid input passed as patch selector (invalid state)") + } + minor, err := strconv.Atoi(match[re.SubexpIndex("minor")]) + if err != nil { + panic("invalid input passed as patch selector (invalid state)") + } + + // patch is optional, means wildcard if left off + patch := AnyPoint + if patchRaw := match[re.SubexpIndex("patch")]; patchRaw != "" { + patch = PointVersionFromValidString(patchRaw) + } + return PatchSelector{ + Major: major, + Minor: minor, + Patch: patch, + } +} diff --git a/tools/setup-envtest/versions/parse_test.go b/tools/setup-envtest/versions/parse_test.go new file mode 100644 index 0000000000..062fdcc6c8 --- /dev/null +++ b/tools/setup-envtest/versions/parse_test.go @@ -0,0 +1,95 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versions_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + . "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +func patchSel(x, y int, z PointVersion) PatchSelector { + return PatchSelector{Major: x, Minor: y, Patch: z} +} + +func patchSpec(x, y int, z PointVersion) Spec { + return Spec{Selector: patchSel(x, y, z)} +} + +func tildeSel(x, y, z int) TildeSelector { + return TildeSelector{ + Concrete: Concrete{ + Major: x, Minor: y, Patch: z, + }, + } +} + +func tildeSpec(x, y, z int) Spec { + return Spec{Selector: tildeSel(x, y, z)} +} +func ltSpec(x, y int, z PointVersion) Spec { + // this just keeps the table a bit shorter + return Spec{Selector: LessThanSelector{ + PatchSelector: patchSel(x, y, z), + }} +} +func lteSpec(x, y int, z PointVersion) Spec { + // this just keeps the table a bit shorter + return Spec{Selector: LessThanSelector{ + PatchSelector: patchSel(x, y, z), + OrEquals: true, + }} +} + +var _ = Describe("Parse", func() { + DescribeTable("it should support", + func(spec string, expected Spec) { + Expect(FromExpr(spec)).To(Equal(expected)) + }, + Entry("X.Y versions", "1.16", patchSpec(1, 16, AnyPoint)), + Entry("X.Y.Z versions", "1.16.3", patchSpec(1, 16, PointVersion(3))), + Entry("X.Y.x wildcard", "1.16.x", patchSpec(1, 16, AnyPoint)), + Entry("X.Y.* wildcard", "1.16.*", patchSpec(1, 16, AnyPoint)), + + Entry("~X.Y selector", "~1.16", tildeSpec(1, 16, 0)), + Entry("~X.Y.Z selector", "~1.16.3", tildeSpec(1, 16, 3)), + Entry("~X.Y.x selector", "~1.16.x", tildeSpec(1, 16, 0)), + Entry("~X.Y.* selector", "~1.16.*", tildeSpec(1, 16, 0)), + + Entry("\w+)-(?P\w+)` + // VersionPlatformRE matches concrete version-platform strings. + VersionPlatformRE = regexp.MustCompile(`^` + versionPlatformREBase + `$`) + // ArchiveRE matches concrete version-platform.tar.gz strings. + // The archives published to GitHub releases by controller-tools use the "envtest-v" prefix (e.g. "envtest-v1.30.0-darwin-amd64.tar.gz"). + ArchiveRE = regexp.MustCompile(`^envtest-v` + versionPlatformREBase + `\.tar\.gz$`) +) diff --git a/tools/setup-envtest/versions/selectors_test.go b/tools/setup-envtest/versions/selectors_test.go new file mode 100644 index 0000000000..8357d41c80 --- /dev/null +++ b/tools/setup-envtest/versions/selectors_test.go @@ -0,0 +1,216 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versions_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + . "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +var _ = Describe("Selectors", func() { + Describe("patch", func() { + var sel Selector + Context("with any patch", func() { + BeforeEach(func() { + var err error + sel, err = FromExpr("1.16.*") + Expect(err).NotTo(HaveOccurred()) + }) + + It("should match any patch version with the same major & minor", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 3})).To(BeTrue(), "should match 1.16.3") + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 0})).To(BeTrue(), "should match 1.16.0") + }) + + It("should reject a different major", func() { + Expect(sel.Matches(Concrete{Major: 2, Minor: 16, Patch: 3})).To(BeFalse(), "should reject 2.16.3") + + }) + + It("should reject a different minor", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 17, Patch: 3})).To(BeFalse(), "should reject 1.17.3") + }) + + It("should serialize as X.Y.*", func() { + Expect(sel.String()).To(Equal("1.16.*")) + }) + + It("should not be concrete", func() { + Expect(sel.AsConcrete()).To(BeNil()) + }) + }) + + Context("with a specific patch", func() { + BeforeEach(func() { + var err error + sel, err = FromExpr("1.16.3") + Expect(err).NotTo(HaveOccurred()) + }) + It("should match exactly the major/minor/patch", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 3})).To(BeTrue(), "should match 1.16.3") + }) + + It("should reject a different major", func() { + Expect(sel.Matches(Concrete{Major: 2, Minor: 16, Patch: 3})).To(BeFalse(), "should reject 2.16.3") + + }) + + It("should reject a different minor", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 17, Patch: 3})).To(BeFalse(), "should reject 1.17.3") + + }) + + It("should reject a different patch", func() { + + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 4})).To(BeFalse(), "should reject 1.16.4") + }) + It("should serialize as X.Y.Z", func() { + Expect(sel.String()).To(Equal("1.16.3")) + }) + It("may be concrete", func() { + Expect(sel.AsConcrete()).To(Equal(&Concrete{Major: 1, Minor: 16, Patch: 3})) + }) + }) + + }) + + Describe("tilde", func() { + var sel Selector + BeforeEach(func() { + var err error + sel, err = FromExpr("~1.16.3") + Expect(err).NotTo(HaveOccurred()) + }) + It("should match exactly the major/minor/patch", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 3})).To(BeTrue(), "should match 1.16.3") + }) + + It("should match a patch greater than the given one, with the same major/minor", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 4})).To(BeTrue(), "should match 1.16.4") + }) + + It("should reject a patch less than the given one, with the same major/minor", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 2})).To(BeFalse(), "should reject 1.16.2") + + }) + + It("should reject a different major", func() { + Expect(sel.Matches(Concrete{Major: 2, Minor: 16, Patch: 3})).To(BeFalse(), "should reject 2.16.3") + + }) + + It("should reject a different minor", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 17, Patch: 3})).To(BeFalse(), "should reject 1.17.3") + + }) + + It("should treat ~X.Y.* as ~X.Y.Z", func() { + sel, err := FromExpr("~1.16.*") + Expect(err).NotTo(HaveOccurred()) + + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 0})).To(BeTrue(), "should match 1.16.0") + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 3})).To(BeTrue(), "should match 1.16.3") + Expect(sel.Matches(Concrete{Major: 1, Minor: 17, Patch: 0})).To(BeFalse(), "should reject 1.17.0") + }) + It("should serialize as ~X.Y.Z", func() { + Expect(sel.String()).To(Equal("~1.16.3")) + }) + It("should never be concrete", func() { + Expect(sel.AsConcrete()).To(BeNil()) + }) + }) + + Describe("less-than", func() { + var sel Selector + BeforeEach(func() { + var err error + sel, err = FromExpr("<1.16.3") + Expect(err).NotTo(HaveOccurred()) + }) + It("should reject the exact major/minor/patch", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 3})).To(BeFalse(), "should reject 1.16.3") + + }) + It("should reject greater patches", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 4})).To(BeFalse(), "should reject 1.16.4") + + }) + It("should reject greater majors", func() { + Expect(sel.Matches(Concrete{Major: 2, Minor: 16, Patch: 3})).To(BeFalse(), "should reject 2.16.3") + + }) + It("should reject greater minors", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 17, Patch: 3})).To(BeFalse(), "should reject 1.17.3") + + }) + + It("should accept lesser patches", func() { + + Expect(sel.Matches(Concrete{Major: 1, Minor: 16, Patch: 2})).To(BeTrue(), "should accept 1.16.2") + }) + It("should accept lesser majors", func() { + Expect(sel.Matches(Concrete{Major: 0, Minor: 16, Patch: 3})).To(BeTrue(), "should accept 0.16.3") + + }) + It("should accept lesser minors", func() { + Expect(sel.Matches(Concrete{Major: 1, Minor: 15, Patch: 3})).To(BeTrue(), "should accept 1.15.3") + + }) + It("should serialize as other.Major + } + if c.Minor != other.Minor { + return c.Minor > other.Minor + } + return c.Patch > other.Patch +} + +// Matches checks if this version is equal to the other one. +func (c Concrete) Matches(other Concrete) bool { + return c == other +} + +func (c Concrete) String() string { + return fmt.Sprintf("%d.%d.%d", c.Major, c.Minor, c.Patch) +} + +// PatchSelector selects a set of versions where the patch is a wildcard. +type PatchSelector struct { + Major, Minor int + Patch PointVersion +} + +func (s PatchSelector) String() string { + return fmt.Sprintf("%d.%d.%s", s.Major, s.Minor, s.Patch) +} + +// Matches checks if the given version matches this selector. +func (s PatchSelector) Matches(ver Concrete) bool { + return s.Major == ver.Major && s.Minor == ver.Minor && s.Patch.Matches(ver.Patch) +} + +// AsConcrete returns nil if there are wildcards in this selector, +// and the concrete version that this selects otherwise. +func (s PatchSelector) AsConcrete() *Concrete { + if s.Patch == AnyPoint { + return nil + } + + return &Concrete{ + Major: s.Major, + Minor: s.Minor, + Patch: int(s.Patch), // safe to cast, we've just checked wildcards above + } +} + +// TildeSelector selects [X.Y.Z, X.Y+1.0). +type TildeSelector struct { + Concrete +} + +// Matches checks if the given version matches this selector. +func (s TildeSelector) Matches(ver Concrete) bool { + if s.Concrete.Matches(ver) { + // easy, "exact" match + return true + } + return ver.Major == s.Major && ver.Minor == s.Minor && ver.Patch >= s.Patch +} +func (s TildeSelector) String() string { + return "~" + s.Concrete.String() +} + +// AsConcrete returns nil (this is never a concrete version). +func (s TildeSelector) AsConcrete() *Concrete { + return nil +} + +// LessThanSelector selects versions older than the given one +// (mainly useful for cleaning up). +type LessThanSelector struct { + PatchSelector + OrEquals bool +} + +// Matches checks if the given version matches this selector. +func (s LessThanSelector) Matches(ver Concrete) bool { + if s.Major != ver.Major { + return s.Major > ver.Major + } + if s.Minor != ver.Minor { + return s.Minor > ver.Minor + } + if !s.Patch.Matches(ver.Patch) { + // matches rules out a wildcard, so it's fine to compare as normal numbers + return int(s.Patch) > ver.Patch + } + return s.OrEquals +} +func (s LessThanSelector) String() string { + if s.OrEquals { + return "<=" + s.PatchSelector.String() + } + return "<" + s.PatchSelector.String() +} + +// AsConcrete returns nil (this is never a concrete version). +func (s LessThanSelector) AsConcrete() *Concrete { + return nil +} + +// AnySelector matches any version at all. +type AnySelector struct{} + +// Matches checks if the given version matches this selector. +func (AnySelector) Matches(_ Concrete) bool { return true } + +// AsConcrete returns nil (this is never a concrete version). +func (AnySelector) AsConcrete() *Concrete { return nil } +func (AnySelector) String() string { return "*" } + +// Selector selects some concrete version or range of versions. +type Selector interface { + // AsConcrete tries to return this selector as a concrete version. + // If the selector would only match a single version, it'll return + // that, otherwise it'll return nil. + AsConcrete() *Concrete + // Matches checks if this selector matches the given concrete version. + Matches(ver Concrete) bool + String() string +} + +// Spec matches some version or range of versions, and tells us how to deal with local and +// remote when selecting a version. +type Spec struct { + Selector + + // CheckLatest tells us to check the remote server for the latest + // version that matches our selector, instead of just relying on + // matching local versions. + CheckLatest bool +} + +// MakeConcrete replaces the contents of this spec with one that +// matches the given concrete version (without checking latest +// from the server). +func (s *Spec) MakeConcrete(ver Concrete) { + s.Selector = ver + s.CheckLatest = false +} + +// AsConcrete returns the underlying selector as a concrete version, if +// possible. +func (s Spec) AsConcrete() *Concrete { + return s.Selector.AsConcrete() +} + +// Matches checks if the underlying selector matches the given version. +func (s Spec) Matches(ver Concrete) bool { + return s.Selector.Matches(ver) +} + +func (s Spec) String() string { + res := s.Selector.String() + if s.CheckLatest { + res += "!" + } + return res +} + +// PointVersion represents a wildcard (patch) version +// or concrete number. +type PointVersion int + +const ( + // AnyPoint matches any point version. + AnyPoint PointVersion = -1 +) + +// Matches checks if a point version is compatible +// with a concrete point version. +// Two point versions are compatible if they are +// a) both concrete +// b) one is a wildcard. +func (p PointVersion) Matches(other int) bool { + switch p { + case AnyPoint: + return true + default: + return int(p) == other + } +} +func (p PointVersion) String() string { + switch p { + case AnyPoint: + return "*" + default: + return strconv.Itoa(int(p)) + } +} + +var ( + // LatestVersion matches the most recent version on the remote server. + LatestVersion = Spec{ + Selector: AnySelector{}, + CheckLatest: true, + } + // AnyVersion matches any local or remote version. + AnyVersion = Spec{ + Selector: AnySelector{}, + } +) diff --git a/tools/setup-envtest/versions/versions_suite_test.go b/tools/setup-envtest/versions/versions_suite_test.go new file mode 100644 index 0000000000..db1fe76403 --- /dev/null +++ b/tools/setup-envtest/versions/versions_suite_test.go @@ -0,0 +1,29 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versions_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestVersions(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Versions Suite") +} diff --git a/tools/setup-envtest/workflows/workflows.go b/tools/setup-envtest/workflows/workflows.go new file mode 100644 index 0000000000..fb9123d269 --- /dev/null +++ b/tools/setup-envtest/workflows/workflows.go @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package workflows + +import ( + "context" + "fmt" + "io" + + "github.com/go-logr/logr" + + envp "sigs.k8s.io/controller-runtime/tools/setup-envtest/env" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/version" +) + +// Use is a workflow that prints out information about stored +// version-platform pairs, downloading them if necessary & requested. +type Use struct { + UseEnv bool + AssetsPath string + PrintFormat envp.PrintFormat +} + +// Do executes this workflow. +func (f Use) Do(env *envp.Env) { + ctx := logr.NewContext(context.TODO(), env.Log.WithName("use")) + env.EnsureBaseDirs(ctx) + if f.UseEnv { + // the env var unconditionally + if env.PathMatches(f.AssetsPath) { + env.PrintInfo(f.PrintFormat) + return + } + } + env.EnsureVersionIsSet(ctx) + if env.ExistsAndValid() { + env.PrintInfo(f.PrintFormat) + return + } + if env.NoDownload { + envp.Exit(2, "no such version (%s) exists on disk for this architecture (%s) -- try running `list -i` to see what's on disk", env.Version, env.Platform) + } + env.Fetch(ctx) + env.PrintInfo(f.PrintFormat) +} + +// List is a workflow that lists version-platform pairs in the store +// and on the remote server that match the given filter. +type List struct{} + +// Do executes this workflow. +func (List) Do(env *envp.Env) { + ctx := logr.NewContext(context.TODO(), env.Log.WithName("list")) + env.EnsureBaseDirs(ctx) + env.ListVersions(ctx) +} + +// Cleanup is a workflow that removes version-platform pairs from the store +// that match the given filter. +type Cleanup struct{} + +// Do executes this workflow. +func (Cleanup) Do(env *envp.Env) { + ctx := logr.NewContext(context.TODO(), env.Log.WithName("cleanup")) + + env.NoDownload = true + env.ForceDownload = false + + env.EnsureBaseDirs(ctx) + env.Remove(ctx) +} + +// Sideload is a workflow that adds or replaces a version-platform pair in the +// store, using the given archive as the files. +type Sideload struct { + Input io.Reader + PrintFormat envp.PrintFormat +} + +// Do executes this workflow. +func (f Sideload) Do(env *envp.Env) { + ctx := logr.NewContext(context.TODO(), env.Log.WithName("sideload")) + + env.EnsureBaseDirs(ctx) + env.NoDownload = true + env.Sideload(ctx, f.Input) + env.PrintInfo(f.PrintFormat) +} + +// Version is the workflow that shows the current binary version +// of setup-envtest. +type Version struct{} + +// Do executes the workflow. +func (v Version) Do(env *envp.Env) { + fmt.Fprintf(env.Out, "setup-envtest version: %s\n", version.Version()) +} diff --git a/tools/setup-envtest/workflows/workflows_suite_test.go b/tools/setup-envtest/workflows/workflows_suite_test.go new file mode 100644 index 0000000000..1b487622bd --- /dev/null +++ b/tools/setup-envtest/workflows/workflows_suite_test.go @@ -0,0 +1,46 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflows_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/go-logr/logr" + "github.com/go-logr/zapr" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +var testLog logr.Logger + +func zapLogger() logr.Logger { + testOut := zapcore.AddSync(GinkgoWriter) + enc := zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()) + // bleh setting up logging to the ginkgo writer is annoying + zapLog := zap.New(zapcore.NewCore(enc, testOut, zap.DebugLevel), + zap.ErrorOutput(testOut), zap.Development(), zap.AddStacktrace(zap.WarnLevel)) + return zapr.NewLogger(zapLog) +} + +func TestWorkflows(t *testing.T) { + testLog = zapLogger() + RegisterFailHandler(Fail) + RunSpecs(t, "Workflows Suite") +} diff --git a/tools/setup-envtest/workflows/workflows_test.go b/tools/setup-envtest/workflows/workflows_test.go new file mode 100644 index 0000000000..435ae24285 --- /dev/null +++ b/tools/setup-envtest/workflows/workflows_test.go @@ -0,0 +1,459 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package workflows_test + +import ( + "bytes" + "fmt" + "io/fs" + "path/filepath" + "runtime/debug" + "sort" + "strings" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/ghttp" + "github.com/spf13/afero" + "k8s.io/apimachinery/pkg/util/sets" + envp "sigs.k8s.io/controller-runtime/tools/setup-envtest/env" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/remote" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/store" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" + wf "sigs.k8s.io/controller-runtime/tools/setup-envtest/workflows" +) + +func ver(major, minor, patch int) versions.Concrete { + return versions.Concrete{ + Major: major, + Minor: minor, + Patch: patch, + } +} + +func shouldHaveError() { + var err error + var code int + if cause := recover(); envp.CheckRecover(cause, func(caughtCode int, caughtErr error) { + err = caughtErr + code = caughtCode + }) { + panic(cause) + } + Expect(err).To(HaveOccurred(), "should write an error") + Expect(code).NotTo(BeZero(), "should exit with a non-zero code") +} + +const ( + testStorePath = ".teststore" +) + +var _ = Describe("Workflows", func() { + var ( + env *envp.Env + out *bytes.Buffer + server *ghttp.Server + remoteHTTPItems itemsHTTP + ) + BeforeEach(func() { + out = new(bytes.Buffer) + baseFs := afero.Afero{Fs: afero.NewMemMapFs()} + + server = ghttp.NewServer() + + client := &remote.HTTPClient{ + Log: testLog.WithName("http-client"), + IndexURL: fmt.Sprintf("http://%s/%s", server.Addr(), "envtest-releases.yaml"), + } + + env = &envp.Env{ + Log: testLog, + VerifySum: true, // on by default + FS: baseFs, + Store: &store.Store{Root: afero.NewBasePathFs(baseFs, testStorePath)}, + Out: out, + Platform: versions.PlatformItem{ // default + Platform: versions.Platform{ + OS: "linux", + Arch: "amd64", + }, + }, + Client: client, + } + + fakeStore(env.FS, testStorePath) + remoteHTTPItems = remoteVersionsHTTP + }) + JustBeforeEach(func() { + handleRemoteVersionsHTTP(server, remoteHTTPItems) + }) + AfterEach(func() { + server.Close() + server = nil + }) + + Describe("use", func() { + var flow wf.Use + BeforeEach(func() { + // some defaults for most tests + env.Version = versions.Spec{ + Selector: ver(1, 16, 0), + } + flow = wf.Use{ + PrintFormat: envp.PrintPath, + } + }) + + It("should initialize the store if it doesn't exist", func() { + Expect(env.FS.RemoveAll(testStorePath)).To(Succeed()) + // need to set this to a valid remote version cause our store is now empty + env.Version = versions.Spec{Selector: ver(1, 16, 4)} + flow.Do(env) + Expect(env.FS.Stat(testStorePath)).NotTo(BeNil()) + }) + + Context("when use env is set", func() { + BeforeEach(func() { + flow.UseEnv = true + }) + It("should fall back to normal behavior when the env is not set", func() { + flow.Do(env) + Expect(out.String()).To(HaveSuffix("/1.16.0-linux-amd64"), "should fall back to a local version") + }) + It("should fall back to normal behavior if binaries are missing", func() { + flow.AssetsPath = ".teststore/missing-binaries" + flow.Do(env) + Expect(out.String()).To(HaveSuffix("/1.16.0-linux-amd64"), "should fall back to a local version") + }) + It("should use the value of the env if it contains the right binaries", func() { + flow.AssetsPath = ".teststore/good-version" + flow.Do(env) + Expect(out.String()).To(Equal(flow.AssetsPath)) + }) + It("should not try and check the version of the binaries", func() { + flow.AssetsPath = ".teststore/wrong-version" + flow.Do(env) + Expect(out.String()).To(Equal(flow.AssetsPath)) + }) + It("should not need to contact the network", func() { + server.Close() + flow.AssetsPath = ".teststore/good-version" + flow.Do(env) + // expect to not get a panic -- if we do, it'll cause the test to fail + }) + }) + + Context("when downloads are disabled", func() { + BeforeEach(func() { + env.NoDownload = true + server.Close() + }) + + // It("should not contact the network") is a gimme here, because we + // call server.Close() above. + + It("should error if no matches are found locally", func() { + defer shouldHaveError() + env.Version.Selector = versions.Concrete{Major: 9001} + flow.Do(env) + }) + It("should settle for the latest local match if latest is requested", func() { + env.Version = versions.Spec{ + CheckLatest: true, + Selector: versions.PatchSelector{ + Major: 1, + Minor: 16, + Patch: versions.AnyPoint, + }, + } + + flow.Do(env) + + // latest on "server" is 1.16.4, shouldn't use that + Expect(out.String()).To(HaveSuffix("/1.16.1-linux-amd64"), "should use the latest local version") + }) + }) + + Context("if latest is requested", func() { + It("should contact the network to see if there's anything newer", func() { + env.Version = versions.Spec{ + CheckLatest: true, + Selector: versions.PatchSelector{ + Major: 1, Minor: 16, Patch: versions.AnyPoint, + }, + } + flow.Do(env) + Expect(out.String()).To(HaveSuffix("/1.16.4-linux-amd64"), "should use the latest remote version") + }) + It("should still use the latest local if the network doesn't have anything newer", func() { + env.Version = versions.Spec{ + CheckLatest: true, + Selector: versions.PatchSelector{ + Major: 1, Minor: 14, Patch: versions.AnyPoint, + }, + } + + flow.Do(env) + + // latest on the server is 1.14.1, latest local is 1.14.26 + Expect(out.String()).To(HaveSuffix("/1.14.26-linux-amd64"), "should use the latest local version") + }) + }) + + It("should check local for a match first", func() { + server.Close() // confirm no network + env.Version = versions.Spec{ + Selector: versions.TildeSelector{Concrete: ver(1, 16, 0)}, + } + flow.Do(env) + // latest on the server is 1.16.4, latest local is 1.16.1 + Expect(out.String()).To(HaveSuffix("/1.16.1-linux-amd64"), "should use the latest local version") + }) + + It("should fall back to the network if no local matches are found", func() { + env.Version = versions.Spec{ + Selector: versions.TildeSelector{Concrete: ver(1, 19, 0)}, + } + flow.Do(env) + Expect(out.String()).To(HaveSuffix("/1.19.2-linux-amd64"), "should have a remote version") + }) + + It("should error out if no matches can be found anywhere", func() { + defer shouldHaveError() + env.Version = versions.Spec{ + Selector: versions.TildeSelector{Concrete: ver(0, 0, 1)}, + } + flow.Do(env) + }) + + It("should skip local versions matches with non-matching platforms", func() { + env.NoDownload = true // so we get an error + defer shouldHaveError() + env.Version = versions.Spec{ + // has non-matching local versions + Selector: ver(1, 13, 0), + } + + flow.Do(env) + }) + + It("should skip remote version matches with non-matching platforms", func() { + defer shouldHaveError() + env.Version = versions.Spec{ + // has a non-matching remote version + Selector: versions.TildeSelector{Concrete: ver(1, 11, 1)}, + } + flow.Do(env) + }) + + Describe("verifying the checksum", func() { + BeforeEach(func() { + // Recreate remoteHTTPItems to not impact others tests. + remoteHTTPItems = makeContentsHTTP(remoteNamesHTTP) + remoteHTTPItems.index.Releases["v86.75.309"] = map[string]remote.Archive{ + "envtest-v86.75.309-linux-amd64.tar.gz": { + SelfLink: "not used in this test", + Hash: "nottherightone!", + }, + } + // need a valid tar.gz file to not error from that + remoteHTTPItems.contents["envtest-v86.75.309-linux-amd64.tar.gz"] = remoteHTTPItems.contents["envtest-v1.10-darwin-amd64.tar.gz"] + + env.Version = versions.Spec{ + Selector: ver(86, 75, 309), + } + }) + Specify("when enabled, should fail if the downloaded hash doesn't match", func() { + defer shouldHaveError() + flow.Do(env) + }) + Specify("when disabled, shouldn't check the checksum at all", func() { + env.VerifySum = false + flow.Do(env) + }) + }) + }) + + Describe("list", func() { + // split by fields so we're not matching on whitespace + listFields := func() [][]string { + resLines := strings.Split(strings.TrimSpace(out.String()), "\n") + resFields := make([][]string, len(resLines)) + for i, line := range resLines { + resFields[i] = strings.Fields(line) + } + return resFields + } + + Context("when downloads are disabled", func() { + BeforeEach(func() { + server.Close() // ensure no network + env.NoDownload = true + }) + It("should include local contents sorted by version", func() { + env.Version = versions.AnyVersion + env.Platform.Platform = versions.Platform{OS: "*", Arch: "*"} + wf.List{}.Do(env) + + Expect(listFields()).To(Equal([][]string{ + {"(installed)", "v1.17.9", "linux/amd64"}, + {"(installed)", "v1.16.2", "ifonlysingularitywasstillathing/amd64"}, + {"(installed)", "v1.16.2", "linux/yourimagination"}, + {"(installed)", "v1.16.1", "linux/amd64"}, + {"(installed)", "v1.16.0", "linux/amd64"}, + {"(installed)", "v1.14.26", "hyperwarp/pixiedust"}, + {"(installed)", "v1.14.26", "linux/amd64"}, + })) + }) + It("should skip non-matching local contents", func() { + env.Version.Selector = versions.PatchSelector{ + Major: 1, Minor: 16, Patch: versions.AnyPoint, + } + env.Platform.Arch = "*" + wf.List{}.Do(env) + + Expect(listFields()).To(Equal([][]string{ + {"(installed)", "v1.16.2", "linux/yourimagination"}, + {"(installed)", "v1.16.1", "linux/amd64"}, + {"(installed)", "v1.16.0", "linux/amd64"}, + })) + }) + }) + Context("when downloads are enabled", func() { + Context("when sorting", func() { + BeforeEach(func() { + // Recreate remoteHTTPItems to not impact others tests. + remoteHTTPItems = makeContentsHTTP(remoteNamesHTTP) + // Also only keep the first 7 items. + // Get the first 7 archive names + var archiveNames []string + for _, release := range remoteHTTPItems.index.Releases { + for archiveName := range release { + archiveNames = append(archiveNames, archiveName) + } + } + sort.Strings(archiveNames) + archiveNamesSet := sets.Set[string]{}.Insert(archiveNames[:7]...) + // Delete all other archives + for _, release := range remoteHTTPItems.index.Releases { + for archiveName := range release { + if !archiveNamesSet.Has(archiveName) { + delete(release, archiveName) + } + } + } + }) + It("should sort local & remote by version", func() { + env.Version = versions.AnyVersion + env.Platform.Platform = versions.Platform{OS: "*", Arch: "*"} + wf.List{}.Do(env) + + Expect(listFields()).To(Equal([][]string{ + {"(installed)", "v1.17.9", "linux/amd64"}, + {"(installed)", "v1.16.2", "ifonlysingularitywasstillathing/amd64"}, + {"(installed)", "v1.16.2", "linux/yourimagination"}, + {"(installed)", "v1.16.1", "linux/amd64"}, + {"(installed)", "v1.16.0", "linux/amd64"}, + {"(installed)", "v1.14.26", "hyperwarp/pixiedust"}, + {"(installed)", "v1.14.26", "linux/amd64"}, + {"(available)", "v1.11.1", "potato/cherrypie"}, + {"(available)", "v1.11.0", "darwin/amd64"}, + {"(available)", "v1.11.0", "linux/amd64"}, + {"(available)", "v1.10.1", "darwin/amd64"}, + {"(available)", "v1.10.1", "linux/amd64"}, + })) + }) + }) + It("should skip non-matching remote contents", func() { + env.Version.Selector = versions.PatchSelector{ + Major: 1, Minor: 16, Patch: versions.AnyPoint, + } + env.Platform.Arch = "*" + wf.List{}.Do(env) + + Expect(listFields()).To(Equal([][]string{ + {"(installed)", "v1.16.2", "linux/yourimagination"}, + {"(installed)", "v1.16.1", "linux/amd64"}, + {"(installed)", "v1.16.0", "linux/amd64"}, + {"(available)", "v1.16.4", "linux/amd64"}, + })) + }) + }) + }) + + Describe("cleanup", func() { + BeforeEach(func() { + server.Close() // ensure no network + flow := wf.Cleanup{} + env.Version = versions.AnyVersion + env.Platform.Arch = "*" + flow.Do(env) + }) + + It("should remove matching versions from the store & keep non-matching ones", func() { + entries, err := env.FS.ReadDir(".teststore/k8s") + Expect(err).NotTo(HaveOccurred(), "should be able to read the store") + Expect(entries).To(ConsistOf( + WithTransform(fs.FileInfo.Name, Equal("1.16.2-ifonlysingularitywasstillathing-amd64")), + WithTransform(fs.FileInfo.Name, Equal("1.14.26-hyperwarp-pixiedust")), + )) + }) + }) + + Describe("sideload", func() { + var ( + flow wf.Sideload + ) + + // hard coding to one of the archives in remoteVersionsHTTP as we can't pick the "first" of a map. + expectedPrefix := "envtest-v1.10-darwin-amd64.tar.gz" + + BeforeEach(func() { + server.Close() // ensure no network + + content := remoteVersionsHTTP.contents[expectedPrefix] + + flow.Input = bytes.NewReader(content) + flow.PrintFormat = envp.PrintPath + }) + It("should initialize the store if it doesn't exist", func() { + env.Version.Selector = ver(1, 10, 0) + Expect(env.FS.RemoveAll(testStorePath)).To(Succeed()) + flow.Do(env) + Expect(env.FS.Stat(testStorePath)).NotTo(BeNil()) + }) + It("should fail if a non-concrete version is given", func() { + defer shouldHaveError() + env.Version = versions.LatestVersion + flow.Do(env) + }) + It("should fail if a non-concrete platform is given", func() { + defer shouldHaveError() + env.Version.Selector = ver(1, 10, 0) + env.Platform.Arch = "*" + flow.Do(env) + }) + It("should load the given gizipped tarball into our store as the given version", func() { + env.Version.Selector = ver(1, 10, 0) + flow.Do(env) + baseName := env.Platform.BaseName(*env.Version.AsConcrete()) + expectedPath := filepath.Join(".teststore/k8s", baseName, "some-file") + outContents, err := env.FS.ReadFile(expectedPath) + Expect(err).NotTo(HaveOccurred(), "should be able to load the unzipped file") + Expect(string(outContents)).To(HavePrefix(expectedPrefix), "should have the debugging prefix") + }) + }) + + Describe("version", func() { + It("should print out the version if the RELEASE_TAG is empty", func() { + v := wf.Version{} + v.Do(env) + info, ok := debug.ReadBuildInfo() + Expect(ok).To(BeTrue()) + Expect(out.String()).ToNot(BeEmpty()) + Expect(out.String()).To(Equal(fmt.Sprintf("setup-envtest version: %s\n", info.Main.Version))) + }) + }) + +}) diff --git a/tools/setup-envtest/workflows/workflows_testutils_test.go b/tools/setup-envtest/workflows/workflows_testutils_test.go new file mode 100644 index 0000000000..6bf6db38c3 --- /dev/null +++ b/tools/setup-envtest/workflows/workflows_testutils_test.go @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 The Kubernetes Authors + +package workflows_test + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/rand" + "crypto/sha512" + "encoding/hex" + "fmt" + "net/http" + "path/filepath" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/ghttp" + "github.com/spf13/afero" + "sigs.k8s.io/controller-runtime/tools/setup-envtest/remote" + "sigs.k8s.io/yaml" + + "sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" +) + +var ( + remoteNamesHTTP = remote.Index{ + Releases: map[string]remote.Release{ + "v1.10.0": map[string]remote.Archive{ + "envtest-v1.10-darwin-amd64.tar.gz": {}, + "envtest-v1.10-linux-amd64.tar.gz": {}, + }, + "v1.10.1": map[string]remote.Archive{ + "envtest-v1.10.1-darwin-amd64.tar.gz": {}, + "envtest-v1.10.1-linux-amd64.tar.gz": {}, + }, + "v1.11.0": map[string]remote.Archive{ + "envtest-v1.11.0-darwin-amd64.tar.gz": {}, + "envtest-v1.11.0-linux-amd64.tar.gz": {}, + }, + "v1.11.1": map[string]remote.Archive{ + "envtest-v1.11.1-potato-cherrypie.tar.gz": {}, + }, + "v1.12.3": map[string]remote.Archive{ + "envtest-v1.12.3-darwin-amd64.tar.gz": {}, + "envtest-v1.12.3-linux-amd64.tar.gz": {}, + }, + "v1.13.1": map[string]remote.Archive{ + "envtest-v1.13.1-darwin-amd64.tar.gz": {}, + "envtest-v1.13.1-linux-amd64.tar.gz": {}, + }, + "v1.14.1": map[string]remote.Archive{ + "envtest-v1.14.1-darwin-amd64.tar.gz": {}, + "envtest-v1.14.1-linux-amd64.tar.gz": {}, + }, + "v1.15.5": map[string]remote.Archive{ + "envtest-v1.15.5-darwin-amd64.tar.gz": {}, + "envtest-v1.15.5-linux-amd64.tar.gz": {}, + }, + "v1.16.4": map[string]remote.Archive{ + "envtest-v1.16.4-darwin-amd64.tar.gz": {}, + "envtest-v1.16.4-linux-amd64.tar.gz": {}, + }, + "v1.17.9": map[string]remote.Archive{ + "envtest-v1.17.9-darwin-amd64.tar.gz": {}, + "envtest-v1.17.9-linux-amd64.tar.gz": {}, + }, + "v1.19.0": map[string]remote.Archive{ + "envtest-v1.19.0-darwin-amd64.tar.gz": {}, + "envtest-v1.19.0-linux-amd64.tar.gz": {}, + }, + "v1.19.2": map[string]remote.Archive{ + "envtest-v1.19.2-darwin-amd64.tar.gz": {}, + "envtest-v1.19.2-linux-amd64.tar.gz": {}, + "envtest-v1.19.2-linux-arm64.tar.gz": {}, + "envtest-v1.19.2-linux-ppc64le.tar.gz": {}, + }, + "v1.20.2": map[string]remote.Archive{ + "envtest-v1.20.2-darwin-amd64.tar.gz": {}, + "envtest-v1.20.2-linux-amd64.tar.gz": {}, + "envtest-v1.20.2-linux-arm64.tar.gz": {}, + "envtest-v1.20.2-linux-ppc64le.tar.gz": {}, + }, + }, + } + remoteVersionsHTTP = makeContentsHTTP(remoteNamesHTTP) + + // keep this sorted. + localVersions = []versions.Set{ + {Version: ver(1, 17, 9), Platforms: []versions.PlatformItem{ + {Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + }}, + {Version: ver(1, 16, 2), Platforms: []versions.PlatformItem{ + {Platform: versions.Platform{OS: "linux", Arch: "yourimagination"}}, + {Platform: versions.Platform{OS: "ifonlysingularitywasstillathing", Arch: "amd64"}}, + }}, + {Version: ver(1, 16, 1), Platforms: []versions.PlatformItem{ + {Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + }}, + {Version: ver(1, 16, 0), Platforms: []versions.PlatformItem{ + {Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + }}, + {Version: ver(1, 14, 26), Platforms: []versions.PlatformItem{ + {Platform: versions.Platform{OS: "linux", Arch: "amd64"}}, + {Platform: versions.Platform{OS: "hyperwarp", Arch: "pixiedust"}}, + }}, + } +) + +type itemsHTTP struct { + index remote.Index + contents map[string][]byte +} + +func makeContentsHTTP(index remote.Index) itemsHTTP { + // This creates a new copy of the index so modifying the index + // in some tests doesn't affect others. + res := itemsHTTP{ + index: remote.Index{ + Releases: map[string]remote.Release{}, + }, + contents: map[string][]byte{}, + } + + for releaseVersion, releases := range index.Releases { + res.index.Releases[releaseVersion] = remote.Release{} + for archiveName := range releases { + var chunk [1024 * 48]byte // 1.5 times our chunk read size in GetVersion + copy(chunk[:], archiveName) + if _, err := rand.Read(chunk[len(archiveName):]); err != nil { + panic(err) + } + content, hash := verWithHTTP(chunk[:]) + + res.index.Releases[releaseVersion][archiveName] = remote.Archive{ + Hash: hash, + // Note: Only storing the name of the archive for now. + // This will be expanded later to a full URL once the server is running. + SelfLink: archiveName, + } + res.contents[archiveName] = content + } + } + return res +} + +func verWithHTTP(contents []byte) ([]byte, string) { + out := new(bytes.Buffer) + gzipWriter := gzip.NewWriter(out) + tarWriter := tar.NewWriter(gzipWriter) + err := tarWriter.WriteHeader(&tar.Header{ + Name: "controller-tools/envtest/some-file", + Size: int64(len(contents)), + Mode: 0777, // so we can check that we fix this later + }) + if err != nil { + panic(err) + } + _, err = tarWriter.Write(contents) + if err != nil { + panic(err) + } + tarWriter.Close() + gzipWriter.Close() + content := out.Bytes() + // controller-tools is using sha512 + hash := sha512.Sum512(content) + hashEncoded := hex.EncodeToString(hash[:]) + return content, hashEncoded +} + +func handleRemoteVersionsHTTP(server *ghttp.Server, items itemsHTTP) { + if server.HTTPTestServer == nil { + // Just return for test cases where server is closed in BeforeEach. Otherwise server.Addr() below panics. + return + } + + // The index from items contains only relative SelfLinks. + // finalIndex will contain the full links based on server.Addr(). + finalIndex := remote.Index{ + Releases: map[string]remote.Release{}, + } + + for releaseVersion, releases := range items.index.Releases { + finalIndex.Releases[releaseVersion] = remote.Release{} + + for archiveName, archive := range releases { + finalIndex.Releases[releaseVersion][archiveName] = remote.Archive{ + Hash: archive.Hash, + SelfLink: fmt.Sprintf("http://%s/%s", server.Addr(), archive.SelfLink), + } + content := items.contents[archiveName] + + // Note: Using the relative path from archive here instead of the full path. + server.RouteToHandler("GET", "/"+archive.SelfLink, func(resp http.ResponseWriter, req *http.Request) { + resp.WriteHeader(http.StatusOK) + Expect(resp.Write(content)).To(Equal(len(content))) + }) + } + } + + indexYAML, err := yaml.Marshal(finalIndex) + Expect(err).ToNot(HaveOccurred()) + + server.RouteToHandler("GET", "/envtest-releases.yaml", ghttp.RespondWith( + http.StatusOK, + indexYAML, + )) +} + +func fakeStore(fs afero.Afero, dir string) { + By("making the unpacked directory") + unpackedBase := filepath.Join(dir, "k8s") + Expect(fs.Mkdir(unpackedBase, 0755)).To(Succeed()) + + By("making some fake (empty) versions") + for _, set := range localVersions { + for _, plat := range set.Platforms { + Expect(fs.Mkdir(filepath.Join(unpackedBase, plat.BaseName(set.Version)), 0755)).To(Succeed()) + } + } + + By("making some fake non-store paths") + Expect(fs.Mkdir(filepath.Join(dir, "missing-binaries"), 0755)).To(Succeed()) + + Expect(fs.Mkdir(filepath.Join(dir, "wrong-version"), 0755)).To(Succeed()) + Expect(fs.WriteFile(filepath.Join(dir, "wrong-version", "kube-apiserver"), nil, 0755)).To(Succeed()) + Expect(fs.WriteFile(filepath.Join(dir, "wrong-version", "kubectl"), nil, 0755)).To(Succeed()) + Expect(fs.WriteFile(filepath.Join(dir, "wrong-version", "etcd"), nil, 0755)).To(Succeed()) + + Expect(fs.Mkdir(filepath.Join(dir, "good-version"), 0755)).To(Succeed()) + Expect(fs.WriteFile(filepath.Join(dir, "good-version", "kube-apiserver"), nil, 0755)).To(Succeed()) + Expect(fs.WriteFile(filepath.Join(dir, "good-version", "kubectl"), nil, 0755)).To(Succeed()) + Expect(fs.WriteFile(filepath.Join(dir, "good-version", "etcd"), nil, 0755)).To(Succeed()) + // TODO: put the right files +}