diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml index e5db81621..f8796c21f 100644 --- a/.github/dependabot.yaml +++ b/.github/dependabot.yaml @@ -5,7 +5,7 @@ updates: directory: "/" labels: ["dependencies"] schedule: - interval: "daily" + interval: "monthly" groups: go-deps: patterns: @@ -37,4 +37,4 @@ updates: patterns: - "*" schedule: - interval: "daily" + interval: "monthly" diff --git a/.github/labels.yaml b/.github/labels.yaml index b67fea2c3..2f3e1d525 100644 --- a/.github/labels.yaml +++ b/.github/labels.yaml @@ -25,3 +25,18 @@ - name: backport:release/v1.2.x description: To be backported to release/v1.2.x color: '#ffd700' +- name: backport:release/v1.3.x + description: To be backported to release/v1.3.x + color: '#ffd700' +- name: backport:release/v1.4.x + description: To be backported to release/v1.4.x + color: '#ffd700' +- name: backport:release/v1.5.x + description: To be backported to release/v1.5.x + color: '#ffd700' +- name: backport:release/v1.6.x + description: To be backported to release/v1.6.x + color: '#ffd700' +- name: backport:release/v1.7.x + description: To be backported to release/v1.7.x + color: '#ffd700' diff --git a/.github/workflows/backport.yaml b/.github/workflows/backport.yaml index 4635e3e68..4081bb128 100644 --- a/.github/workflows/backport.yaml +++ b/.github/workflows/backport.yaml @@ -1,34 +1,12 @@ name: backport - on: pull_request_target: types: [closed, labeled] - -permissions: - contents: read - jobs: - pull-request: - runs-on: ubuntu-latest + backport: permissions: - contents: write - pull-requests: write - if: github.event.pull_request.state == 'closed' && github.event.pull_request.merged && (github.event_name != 'labeled' || startsWith('backport:', github.event.label.name)) - steps: - - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - ref: ${{ github.event.pull_request.head.sha }} - - name: Create backport PRs - uses: korthout/backport-action@e8161d6a0dbfa2651b7daa76cbb75bc7c925bbf3 # v2.4.1 - # xref: https://github.com/korthout/backport-action#inputs - with: - # Use token to allow workflows to be triggered for the created PR - github_token: ${{ secrets.BOT_GITHUB_TOKEN }} - # Match labels with a pattern `backport:` - label_pattern: '^backport:([^ ]+)$' - # A bit shorter pull-request title than the default - pull_title: '[${target_branch}] ${pull_title}' - # Simpler PR description than default - pull_description: |- - Automated backport to `${target_branch}`, triggered by a label in #${pull_number}. + contents: write # for reading and creating branches. + pull-requests: write # for creating pull requests against release branches. + uses: fluxcd/gha-workflows/.github/workflows/backport.yaml@v0.4.0 + secrets: + github-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/cifuzz.yaml b/.github/workflows/cifuzz.yaml index 1684208b6..16ddaa227 100644 --- a/.github/workflows/cifuzz.yaml +++ b/.github/workflows/cifuzz.yaml @@ -4,27 +4,16 @@ on: branches: - 'main' - 'release/**' - paths-ignore: - - 'CHANGELOG.md' - - 'README.md' - - 'MAINTAINERS' - -permissions: - contents: read - jobs: smoketest: runs-on: ubuntu-latest + permissions: + contents: read # for reading the repository code. steps: - - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + - name: Test suite setup + uses: fluxcd/gha-workflows/.github/actions/setup-kubernetes@v0.4.0 with: - go-version: 1.21.x - cache-dependency-path: | - **/go.sum - **/go.mod + go-version: 1.25.x - name: Smoke test Fuzzers run: make fuzz-smoketest env: diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index fc2677f60..483e65ad6 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -1,92 +1,35 @@ name: e2e - on: + workflow_dispatch: pull_request: - paths-ignore: - - 'CHANGELOG.md' - - 'README.md' - - 'MAINTAINERS' push: branches: - 'main' - 'release/**' - -permissions: - contents: read # for actions/checkout to fetch code - jobs: - kind-linux-amd64: runs-on: ubuntu-latest + permissions: + contents: read # for reading the repository code. steps: - - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + - name: Test suite setup + uses: fluxcd/gha-workflows/.github/actions/setup-kubernetes@v0.4.0 with: - go-version: 1.21.x - cache-dependency-path: | - **/go.sum - **/go.mod + go-version: 1.25.x + - name: Verify + run: make verify - name: Enable integration tests # Only run integration tests for main and release branches if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release/') run: | echo 'GO_TAGS=integration' >> $GITHUB_ENV - - name: Setup Kubernetes - uses: helm/kind-action@99576bfa6ddf9a8e612d83b513da5a75875caced # v1.9.0 - with: - version: v0.19.0 - cluster_name: kind - node_image: kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6 - - name: Setup Kustomize - uses: fluxcd/pkg/actions/kustomize@main - - name: Setup Helm - uses: fluxcd/pkg/actions/helm@main - name: Run E2E tests env: SKIP_COSIGN_VERIFICATION: true CREATE_CLUSTER: false run: make e2e - - kind-linux-arm64: - # Hosted on Equinix - # Docs: https://github.com/fluxcd/flux2/tree/main/.github/runners - runs-on: [self-hosted, Linux, ARM64, equinix] - steps: - - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 - with: - go-version: 1.21.x - cache-dependency-path: | - **/go.sum - **/go.mod - - name: Enable integration tests - # Only run integration tests for main and release branches - if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release/') - run: | - echo 'GO_TAGS=integration' >> $GITHUB_ENV - - name: Prepare - id: prep - run: | - echo "CLUSTER=arm64-${GITHUB_SHA:0:7}-$(date +%s)" >> $GITHUB_OUTPUT - echo "CONTEXT=kind-arm64-${GITHUB_SHA:0:7}-$(date +%s)" >> $GITHUB_OUTPUT - - name: Setup Kubernetes Kind - run: | - kind create cluster --name ${{ steps.prep.outputs.CLUSTER }} --kubeconfig=/tmp/${{ steps.prep.outputs.CLUSTER }} - - name: Run e2e tests - env: - SKIP_COSIGN_VERIFICATION: true - KIND_CLUSTER_NAME: ${{ steps.prep.outputs.CLUSTER }} - KUBECONFIG: /tmp/${{ steps.prep.outputs.CLUSTER }} - CREATE_CLUSTER: false - BUILD_PLATFORM: linux/arm64 - MINIO_TAG: RELEASE.2020-09-17T04-49-20Z-arm64 - run: make e2e - - name: Cleanup + - name: Print controller logs if: always() + continue-on-error: true run: | - kind delete cluster --name ${{ steps.prep.outputs.CLUSTER }} - rm /tmp/${{ steps.prep.outputs.CLUSTER }} + kubectl -n source-system logs -l app=source-controller diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml deleted file mode 100644 index 2c7a2c6f9..000000000 --- a/.github/workflows/nightly.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: nightly -on: - schedule: - - cron: '0 0 * * *' - workflow_dispatch: - -env: - REPOSITORY: ${{ github.repository }} - -permissions: - contents: read # for actions/checkout to fetch code - -jobs: - build: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: Setup QEMU - uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0 - - name: Setup Docker Buildx - id: buildx - uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 - with: - buildkitd-flags: "--debug" - - name: Build multi-arch container image - uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0 - with: - push: false - builder: ${{ steps.buildx.outputs.name }} - context: . - file: ./Dockerfile - platforms: linux/amd64,linux/arm/v7,linux/arm64 - tags: | - ${{ env.REPOSITORY }}:nightly diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 519d8867a..e7097010c 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -7,115 +7,21 @@ on: inputs: tag: description: 'image tag prefix' - default: 'preview' + default: 'rc' required: true - -permissions: - contents: read - -env: - CONTROLLER: ${{ github.event.repository.name }} - jobs: release: - outputs: - hashes: ${{ steps.slsa.outputs.hashes }} - image_url: ${{ steps.slsa.outputs.image_url }} - image_digest: ${{ steps.slsa.outputs.image_digest }} - runs-on: ubuntu-latest permissions: contents: write # for creating the GitHub release. id-token: write # for creating OIDC tokens for signing. packages: write # for pushing and signing container images. - steps: - - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: Setup Kustomize - uses: fluxcd/pkg/actions/kustomize@main - - name: Prepare - id: prep - run: | - VERSION="${{ github.event.inputs.tag }}-${GITHUB_SHA::8}" - if [[ $GITHUB_REF == refs/tags/* ]]; then - VERSION=${GITHUB_REF/refs\/tags\//} - fi - echo "BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT - echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT - - name: Setup QEMU - uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0 - - name: Setup Docker Buildx - id: buildx - uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 - - name: Login to GitHub Container Registry - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 - with: - registry: ghcr.io - username: fluxcdbot - password: ${{ secrets.GHCR_TOKEN }} - - name: Login to Docker Hub - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 - with: - username: fluxcdbot - password: ${{ secrets.DOCKER_FLUXCD_PASSWORD }} - - name: Generate images meta - id: meta - uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 # v5.5.1 - with: - images: | - fluxcd/${{ env.CONTROLLER }} - ghcr.io/fluxcd/${{ env.CONTROLLER }} - tags: | - type=raw,value=${{ steps.prep.outputs.VERSION }} - - name: Publish images - id: build-push - uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0 - with: - sbom: true - provenance: true - push: true - builder: ${{ steps.buildx.outputs.name }} - context: . - file: ./Dockerfile - platforms: linux/amd64,linux/arm/v7,linux/arm64 - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - - uses: sigstore/cosign-installer@e1523de7571e31dbe865fd2e80c5c7c23ae71eb4 # v3.4.0 - - name: Sign images - env: - COSIGN_EXPERIMENTAL: 1 - run: | - cosign sign --yes fluxcd/${{ env.CONTROLLER }}@${{ steps.build-push.outputs.digest }} - cosign sign --yes ghcr.io/fluxcd/${{ env.CONTROLLER }}@${{ steps.build-push.outputs.digest }} - - name: Generate release artifacts - if: startsWith(github.ref, 'refs/tags/v') - run: | - mkdir -p config/release - kustomize build ./config/crd > ./config/release/${{ env.CONTROLLER }}.crds.yaml - kustomize build ./config/manager > ./config/release/${{ env.CONTROLLER }}.deployment.yaml - - uses: anchore/sbom-action/download-syft@b6a39da80722a2cb0ef5d197531764a89b5d48c3 # v0.15.8 - - name: Create release and SBOM - id: run-goreleaser - if: startsWith(github.ref, 'refs/tags/v') - uses: goreleaser/goreleaser-action@7ec5c2b0c6cdda6e8bbb49444bc797dd33d74dd8 # v5.0.0 - with: - version: latest - args: release --clean --skip-validate - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Generate SLSA metadata - id: slsa - env: - ARTIFACTS: "${{ steps.run-goreleaser.outputs.artifacts }}" - run: | - hashes=$(echo -E $ARTIFACTS | jq --raw-output '.[] | {name, "digest": (.extra.Digest // .extra.Checksum)} | select(.digest) | {digest} + {name} | join(" ") | sub("^sha256:";"")' | base64 -w0) - echo "hashes=$hashes" >> $GITHUB_OUTPUT - - image_url=fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.version }} - echo "image_url=$image_url" >> $GITHUB_OUTPUT - - image_digest=${{ steps.build-push.outputs.digest }} - echo "image_digest=$image_digest" >> $GITHUB_OUTPUT - + uses: fluxcd/gha-workflows/.github/workflows/controller-release.yaml@v0.4.0 + with: + controller: ${{ github.event.repository.name }} + release-candidate-prefix: ${{ github.event.inputs.tag }} + secrets: + github-token: ${{ secrets.GITHUB_TOKEN }} + dockerhub-token: ${{ secrets.DOCKER_FLUXCD_PASSWORD }} release-provenance: needs: [release] permissions: @@ -123,35 +29,35 @@ jobs: id-token: write # for creating OIDC tokens for signing. contents: write # for uploading attestations to GitHub releases. if: startsWith(github.ref, 'refs/tags/v') - uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.9.0 + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0 with: provenance-name: "provenance.intoto.jsonl" base64-subjects: "${{ needs.release.outputs.hashes }}" upload-assets: true - dockerhub-provenance: needs: [release] permissions: + contents: read # for reading the repository code. actions: read # for detecting the Github Actions environment. id-token: write # for creating OIDC tokens for signing. packages: write # for uploading attestations. if: startsWith(github.ref, 'refs/tags/v') - uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.9.0 + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0 with: image: ${{ needs.release.outputs.image_url }} digest: ${{ needs.release.outputs.image_digest }} registry-username: fluxcdbot secrets: registry-password: ${{ secrets.DOCKER_FLUXCD_PASSWORD }} - ghcr-provenance: needs: [release] permissions: + contents: read # for reading the repository code. actions: read # for detecting the Github Actions environment. id-token: write # for creating OIDC tokens for signing. packages: write # for uploading attestations. if: startsWith(github.ref, 'refs/tags/v') - uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v1.9.0 + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0 with: image: ghcr.io/${{ needs.release.outputs.image_url }} digest: ${{ needs.release.outputs.image_digest }} diff --git a/.github/workflows/scan.yaml b/.github/workflows/scan.yaml index 9ffcef166..ea8e992de 100644 --- a/.github/workflows/scan.yaml +++ b/.github/workflows/scan.yaml @@ -1,52 +1,17 @@ name: scan - on: push: - branches: [ 'main', 'release/**' ] + branches: [ main ] pull_request: - branches: [ 'main', 'release/**' ] + branches: [ main ] schedule: - cron: '18 10 * * 3' - -permissions: - contents: read # for actions/checkout to fetch code - security-events: write # for codeQL to write security events - jobs: - fossa: - name: FOSSA - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: Run FOSSA scan and upload build data - uses: fossa-contrib/fossa-action@cdc5065bcdee31a32e47d4585df72d66e8e941c2 # v3.0.0 - with: - # FOSSA Push-Only API Token - fossa-api-key: 5ee8bf422db1471e0bcf2bcb289185de - github-token: ${{ github.token }} - - codeql: - name: CodeQL - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 - with: - go-version: 1.21.x - cache-dependency-path: | - **/go.sum - **/go.mod - - name: Initialize CodeQL - uses: github/codeql-action/init@379614612a29c9e28f31f39a59013eb8012a51f0 # v3.24.3 - with: - languages: go - # xref: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs - # xref: https://codeql.github.com/codeql-query-help/go/ - queries: security-and-quality - - name: Autobuild - uses: github/codeql-action/autobuild@379614612a29c9e28f31f39a59013eb8012a51f0 # v3.24.3 - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@379614612a29c9e28f31f39a59013eb8012a51f0 # v3.24.3 + analyze: + permissions: + contents: read # for reading the repository code. + security-events: write # for uploading the CodeQL analysis results. + uses: fluxcd/gha-workflows/.github/workflows/code-scan.yaml@v0.4.0 + secrets: + github-token: ${{ secrets.GITHUB_TOKEN }} + fossa-token: ${{ secrets.FOSSA_TOKEN }} diff --git a/.github/workflows/sync-labels.yaml b/.github/workflows/sync-labels.yaml index 6e41b8c71..a4635094d 100644 --- a/.github/workflows/sync-labels.yaml +++ b/.github/workflows/sync-labels.yaml @@ -6,23 +6,11 @@ on: - main paths: - .github/labels.yaml - -permissions: - contents: read - jobs: - labels: - name: Run sync - runs-on: ubuntu-latest + sync-labels: permissions: - issues: write - steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - uses: EndBug/label-sync@52074158190acb45f3077f9099fea818aa43f97a # v2.3.3 - with: - # Configuration file - config-file: | - https://raw.githubusercontent.com/fluxcd/community/main/.github/standard-labels.yaml - .github/labels.yaml - # Strictly declarative - delete-other-labels: true + contents: read # for reading the labels file. + issues: write # for creating and updating labels. + uses: fluxcd/gha-workflows/.github/workflows/labels-sync.yaml@v0.4.0 + secrets: + github-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml new file mode 100644 index 000000000..c7a9aa2e8 --- /dev/null +++ b/.github/workflows/test.yaml @@ -0,0 +1,22 @@ +name: test +on: + workflow_dispatch: + pull_request: + push: + branches: + - 'main' + - 'release/**' +jobs: + test-linux-amd64: + runs-on: ubuntu-latest + steps: + - name: Test suite setup + uses: fluxcd/gha-workflows/.github/actions/setup-kubernetes@v0.4.0 + with: + go-version: 1.25.x + - name: Run tests + env: + SKIP_COSIGN_VERIFICATION: true + TEST_AZURE_ACCOUNT_NAME: ${{ secrets.TEST_AZURE_ACCOUNT_NAME }} + TEST_AZURE_ACCOUNT_KEY: ${{ secrets.TEST_AZURE_ACCOUNT_KEY }} + run: make test diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml deleted file mode 100644 index afbe4ec9e..000000000 --- a/.github/workflows/tests.yaml +++ /dev/null @@ -1,67 +0,0 @@ -name: tests - -on: - pull_request: - paths-ignore: - - 'CHANGELOG.md' - - 'README.md' - - 'MAINTAINERS' - - push: - branches: - - 'main' - - 'release/**' - -permissions: - contents: read # for actions/checkout to fetch code - -jobs: - - test-linux-amd64: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 - with: - go-version: 1.21.x - cache-dependency-path: | - **/go.sum - **/go.mod - - name: Run tests - env: - SKIP_COSIGN_VERIFICATION: true - TEST_AZURE_ACCOUNT_NAME: ${{ secrets.TEST_AZURE_ACCOUNT_NAME }} - TEST_AZURE_ACCOUNT_KEY: ${{ secrets.TEST_AZURE_ACCOUNT_KEY }} - run: make test - - test-linux-arm64: - # Hosted on Equinix - # Docs: https://github.com/fluxcd/flux2/tree/main/.github/runners - runs-on: [self-hosted, Linux, ARM64, equinix] - steps: - - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 - with: - go-version: 1.21.x - cache-dependency-path: | - **/go.sum - **/go.mod - - name: Run tests - env: - SKIP_COSIGN_VERIFICATION: true - - TEST_AZURE_ACCOUNT_NAME: ${{ secrets.TEST_AZURE_ACCOUNT_NAME }} - TEST_AZURE_ACCOUNT_KEY: ${{ secrets.TEST_AZURE_ACCOUNT_KEY }} - - # Temporarily disabling -race for arm64 as our GitHub action - # runners don't seem to like it. The race detection was tested - # on both Apple M1 and Linux arm64 with successful results. - # - # We should reenable go test -race for arm64 runners once the - # current issue is resolved. - GO_TEST_ARGS: '' - run: make test diff --git a/.github/workflows/verify.yaml b/.github/workflows/verify.yaml deleted file mode 100644 index 401fef839..000000000 --- a/.github/workflows/verify.yaml +++ /dev/null @@ -1,33 +0,0 @@ -name: verify - -on: - pull_request: - paths-ignore: - - 'CHANGELOG.md' - - 'README.md' - - 'MAINTAINERS' - - push: - branches: - - 'main' - - 'release/**' - -permissions: - contents: read # for actions/checkout to fetch code - -jobs: - - verify-linux-amd64: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: Setup Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 - with: - go-version: 1.21.x - cache-dependency-path: | - **/go.sum - **/go.mod - - name: Verify - run: make verify diff --git a/.goreleaser.yaml b/.goreleaser.yaml index a2acb980f..7b61ce0c1 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -23,7 +23,7 @@ release: To verify the images and their provenance (SLSA level 3), please see the [security documentation](https://fluxcd.io/flux/security/). changelog: - skip: true + disable: true checksum: extra_files: diff --git a/CHANGELOG.md b/CHANGELOG.md index 2a2bb0b08..74cb010a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,403 @@ All notable changes to this project are documented in this file. +## 1.7.0 + +**Release date:** 2025-09-15 + +This minor release comes with new features, improvements and bug fixes. + +### ExternalArtifact + +A new [ExternalArtifact](https://github.com/fluxcd/source-controller/blob/main/docs/spec/v1/externalartifacts.md) API has been added to the `source.toolkit.fluxcd.io` group. This API enables advanced source composition and decomposition patterns implemented by the [source-watcher](https://github.com/fluxcd/source-watcher) controller. + +### GitRepository + +GitRepository controller now includes fixes for stalling issues and improved error handling. Multi-tenant workload identity support has been added for Azure repositories when the `ObjectLevelWorkloadIdentity` feature gate is enabled. TLS configuration support has been added for GitHub App authentication. + +### Bucket + +Bucket controller now supports multi-tenant workload identity for AWS, Azure and GCP providers when the `ObjectLevelWorkloadIdentity` feature gate is enabled. A default service account flag has been added for lockdown scenarios. + +### General updates + +The controller now supports system certificate pools for improved CA compatibility, and TLS ServerName pinning has been removed from TLS configuration for better flexibility. A `--default-service-account=` flag was introduced for workload identity multi-tenancy lockdown. + +In addition, the Kubernetes dependencies have been updated to v1.34, Helm +has been updated to v3.19 and various other controller dependencies have +been updated to their latest version. The controller is now built with +Go 1.25. + +Fixes: +- Fix GitRepository controller stalling when it shouldn't + [#1865](https://github.com/fluxcd/source-controller/pull/1865) + +Improvements: +- [RFC-0010] Add multi-tenant workload identity support for GCP Bucket + [#1862](https://github.com/fluxcd/source-controller/pull/1862) +- [RFC-0010] Add multi-tenant workload identity support for AWS Bucket + [#1868](https://github.com/fluxcd/source-controller/pull/1868) +- [RFC-0010] Add multi-tenant workload identity support for Azure GitRepository + [#1871](https://github.com/fluxcd/source-controller/pull/1871) +- [RFC-0010] Add default-service-account for lockdown + [#1872](https://github.com/fluxcd/source-controller/pull/1872) +- [RFC-0010] Add multi-tenant workload identity support for Azure Blob Storage + [#1875](https://github.com/fluxcd/source-controller/pull/1875) +- [RFC-0012] Add ExternalArtifact API documentation + [#1881](https://github.com/fluxcd/source-controller/pull/1881) +- [RFC-0012] Refactor controller to use `fluxcd/pkg/artifact` + [#1883](https://github.com/fluxcd/source-controller/pull/1883) +- Migrate OCIRepository controller to runtime/secrets + [#1851](https://github.com/fluxcd/source-controller/pull/1851) +- Migrate Bucket controller to runtime/secrets + [#1852](https://github.com/fluxcd/source-controller/pull/1852) +- Add TLS config for GitHub App authentication + [#1860](https://github.com/fluxcd/source-controller/pull/1860) +- Remove ServerName pinning from TLS config + [#1870](https://github.com/fluxcd/source-controller/pull/1870) +- Extract storage operations to a dedicated package + [#1864](https://github.com/fluxcd/source-controller/pull/1864) +- Remove deprecated APIs in group `source.toolkit.fluxcd.io/v1beta1` + [#1861](https://github.com/fluxcd/source-controller/pull/1861) +- Migrate tests from gotest to gomega + [#1876](https://github.com/fluxcd/source-controller/pull/1876) +- Update dependencies + [#1888](https://github.com/fluxcd/source-controller/pull/1888) + [#1880](https://github.com/fluxcd/source-controller/pull/1880) + [#1878](https://github.com/fluxcd/source-controller/pull/1878) + [#1876](https://github.com/fluxcd/source-controller/pull/1876) + [#1874](https://github.com/fluxcd/source-controller/pull/1874) + [#1850](https://github.com/fluxcd/source-controller/pull/1850) + [#1844](https://github.com/fluxcd/source-controller/pull/1844) + +## 1.6.2 + +**Release date:** 2025-06-27 + +This patch release comes with a fix for `rsa-sha2-512` and `rsa-sha2-256` algorithms +not being prioritized for `ssh-rsa` host keys. + +Fixes: +- Fix: Prioritize sha2-512 and sha2-256 for ssh-rsa host keys + [#1839](https://github.com/fluxcd/source-controller/pull/1839) + +## 1.6.1 + +**Release date:** 2025-06-13 + +This patch release comes with a fix for the `knownhosts: key mismatch` +error in the `GitRepository` API when using SSH authentication, and +a fix for authentication with +[public ECR repositories](https://fluxcd.io/flux/integrations/aws/#for-amazon-public-elastic-container-registry) +in the `OCIRepository` API. + +Fix: +- Fix authentication for public ECR + [#1825](https://github.com/fluxcd/source-controller/pull/1825) +- Fix `knownhosts key mismatch` regression bug + [#1829](https://github.com/fluxcd/source-controller/pull/1829) + +## 1.6.0 + +**Release date:** 2025-05-27 + +This minor release promotes the OCIRepository API to GA, and comes with new features, +improvements and bug fixes. + +### OCIRepository + +The `OCIRepository` API has been promoted from `v1beta2` to `v1` (GA). +The `v1` API is backwards compatible with `v1beta2`. + +OCIRepository API now supports object-level workload identity by setting +`.spec.provider` to one of `aws`, `azure`, or `gcp`, and setting +`.spec.serviceAccountName` to the name of a service account in the same +namespace that has been configured with appropriate cloud permissions. +For this feature to work, the controller feature gate +`ObjectLevelWorkloadIdentity` must be enabled. See a complete guide +[here](https://fluxcd.io/flux/integrations/). + +OCIRepository API now caches registry credentials for cloud providers +by default. This behavior can be disabled or fine-tuned by adjusting the +token cache controller flags (see [docs](https://fluxcd.io/flux/components/source/options/)). +The token cache also exposes metrics that are documented +[here](https://fluxcd.io/flux/monitoring/metrics/#controller-metrics). + +### GitRepository + +GitRepository API now supports sparse checkout by setting a list +of directories in the `.spec.sparseCheckout` field. This allows +for optimizing the amount of data fetched from the Git repository. + +GitRepository API now supports mTLS authentication for HTTPS Git repositories +by setting the fields `tls.crt`, `tls.key`, and `ca.crt` in the `.data` field +of the referenced Secret in `.spec.secretRef`. + +GitRepository API now caches credentials for non-`generic` providers by default. +This behavior can be disabled or fine-tuned by adjusting the +token cache controller flags (see [docs](https://fluxcd.io/flux/components/source/options/)). +The token cache also exposes metrics that are documented +[here](https://fluxcd.io/flux/monitoring/metrics/#controller-metrics). + +### General updates + +In addition, the Kubernetes dependencies have been updated to v1.33 and +various other controller dependencies have been updated to their latest +version. The controller is now built with Go 1.24. + +Fixes: +- Downgrade `Masterminds/semver` to v3.3.0 + [#1785](https://github.com/fluxcd/source-controller/pull/1785) + +Improvements: +- Promote OCIRepository API to v1 (GA) + [#1794](https://github.com/fluxcd/source-controller/pull/1794) +- [RFC-0010] Introduce object-level workload identity for container registry APIs and cache credentials + [#1790](https://github.com/fluxcd/source-controller/pull/1790) + [#1802](https://github.com/fluxcd/source-controller/pull/1802) + [#1811](https://github.com/fluxcd/source-controller/pull/1811) +- Implement Sparse Checkout for `GitRepository` + [#1774](https://github.com/fluxcd/source-controller/pull/1774) +- Add Mutual TLS support to `GitRepository` + [#1778](https://github.com/fluxcd/source-controller/pull/1778) +- Introduce token cache for `GitRepository` + [#1745](https://github.com/fluxcd/source-controller/pull/1745) + [#1788](https://github.com/fluxcd/source-controller/pull/1788) + [#1789](https://github.com/fluxcd/source-controller/pull/1789) +- Build controller without CGO + [#1725](https://github.com/fluxcd/source-controller/pull/1725) +- Various dependency updates + [#1812](https://github.com/fluxcd/source-controller/pull/1812) + [#1800](https://github.com/fluxcd/source-controller/pull/1800) + [#1810](https://github.com/fluxcd/source-controller/pull/1810) + [#1806](https://github.com/fluxcd/source-controller/pull/1806) + [#1782](https://github.com/fluxcd/source-controller/pull/1782) + [#1783](https://github.com/fluxcd/source-controller/pull/1783) + [#1775](https://github.com/fluxcd/source-controller/pull/1775) + [#1728](https://github.com/fluxcd/source-controller/pull/1728) + [#1722](https://github.com/fluxcd/source-controller/pull/1722) + +## 1.5.0 + +**Release date:** 2025-02-13 + +This minor release comes with various bug fixes and improvements. + +### GitRepository + +The GitRepository API now supports authenticating through GitHub App +for GitHub repositories. See +[docs](https://fluxcd.io/flux/components/source/gitrepositories/#github). + +In addition, the Kubernetes dependencies have been updated to v1.32.1, Helm has +been updated to v3.17.0 and various other controller dependencies have been +updated to their latest version. + +Fixes: +- Remove deprecated object metrics from controllers + [#1686](https://github.com/fluxcd/source-controller/pull/1686) + +Improvements: +- [RFC-007] Implement GitHub app authentication for git repositories. + [#1647](https://github.com/fluxcd/source-controller/pull/1647) +- Various dependency updates + [#1684](https://github.com/fluxcd/source-controller/pull/1684) + [#1689](https://github.com/fluxcd/source-controller/pull/1689) + [#1693](https://github.com/fluxcd/source-controller/pull/1693) + [#1705](https://github.com/fluxcd/source-controller/pull/1705) + [#1708](https://github.com/fluxcd/source-controller/pull/1708) + [#1709](https://github.com/fluxcd/source-controller/pull/1709) + [#1713](https://github.com/fluxcd/source-controller/pull/1713) + [#1716](https://github.com/fluxcd/source-controller/pull/1716) + +## 1.4.1 + +**Release date:** 2024-09-26 + +This patch release comes with a fix to the `GitRepository` API to keep it +backwards compatible by removing the default value for `.spec.provider` field +when not set in the API. The controller will internally consider an empty value +for the provider as the `generic` provider. + +Fix: +- GitRepo: Remove provider default value from API + [#1626](https://github.com/fluxcd/source-controller/pull/1626) + +## 1.4.0 + +**Release date:** 2024-09-25 + +This minor release promotes the Bucket API to GA, and comes with new features, +improvements and bug fixes. + +### Bucket + +The `Bucket` API has been promoted from `v1beta2` to `v1` (GA). +The `v1` API is backwards compatible with `v1beta2`. + +Bucket API now supports proxy through the field `.spec.proxySecretRef` and custom TLS client certificate and CA through the field `.spec.certSecretRef`. + +Bucket API now also supports specifying a custom STS configuration through the field `.spec.sts`. This is currently only supported for the providers `generic` and `aws`. When specifying a custom STS configuration one must specify which STS provider to use. For the `generic` bucket provider we support the `ldap` STS provider, and for the `aws` bucket provider we support the `aws` STS provider. For the `aws` STS provider, one may use the default main STS endpoint, or the regional STS endpoints, or even an interface endpoint. + +### OCIRepository + +OCIRepository API now supports proxy through the field `.spec.proxySecretRef`. + +**Warning**: Proxy is not supported for cosign keyless verification. + +### GitRepository + +GitRepository API now supports OIDC authentication for Azure DevOps repositories through the field `.spec.provider` using the value `azure`. See the docs for details [here](https://fluxcd.io/flux/components/source/gitrepositories/#provider). + +In addition, the Kubernetes dependencies have been updated to v1.31.1, Helm has +been updated to v3.16.1 and various other controller dependencies have been +updated to their latest version. The controller is now built with Go 1.23. + +Fixes: +- helm: Use the default transport pool to preserve proxy settings + [#1490](https://github.com/fluxcd/source-controller/pull/1490) +- Fix incorrect use of format strings with the conditions package. + [#1529](https://github.com/fluxcd/source-controller/pull/1529) +- Fix HelmChart local dependency resolution for name-based path + [#1539](https://github.com/fluxcd/source-controller/pull/1539) +- Fix Helm index validation for Artifactory + [#1516](https://github.com/fluxcd/source-controller/pull/1516) + +Improvements: +- Promote Bucket API to v1 + [#1592](https://github.com/fluxcd/source-controller/pull/1592) +- Add .spec.certSecretRef to Bucket API + [#1475](https://github.com/fluxcd/source-controller/pull/1475) +- Run ARM64 tests on GitHub runners + [#1512](https://github.com/fluxcd/source-controller/pull/1512) +- Add support for .spec.proxySecretRef for generic provider of Bucket API + [#1500](https://github.com/fluxcd/source-controller/pull/1500) +- Improve invalid proxy error message for Bucket API + [#1550](https://github.com/fluxcd/source-controller/pull/1550) +- Add support for AWS STS endpoint in the Bucket API + [#1552](https://github.com/fluxcd/source-controller/pull/1552) +- Add proxy support for GCS buckets + [#1565](https://github.com/fluxcd/source-controller/pull/1565) +- azure-blob: Fix VisitObjects() in integration test + [#1574](https://github.com/fluxcd/source-controller/pull/1574) +- Add proxy support for Azure buckets + [#1567](https://github.com/fluxcd/source-controller/pull/1567) +- Add proxy support for AWS S3 buckets + [#1568](https://github.com/fluxcd/source-controller/pull/1568) +- Add proxy support for OCIRepository API + [#1536](https://github.com/fluxcd/source-controller/pull/1536) +- Add LDAP provider for Bucket STS API + [#1585](https://github.com/fluxcd/source-controller/pull/1585) +- Introduce Bucket provider constants with the common part as a prefix + [#1589](https://github.com/fluxcd/source-controller/pull/1589) +- OCIRepository: Configure proxy for OIDC auth + [#1607](https://github.com/fluxcd/source-controller/pull/1607) +- [RFC-0007] Enable Azure OIDC for Azure DevOps repositories + [#1591](https://github.com/fluxcd/source-controller/pull/1591) +- Build with Go 1.23 + [#1582](https://github.com/fluxcd/source-controller/pull/1582) +- Various dependency updates + [#1507](https://github.com/fluxcd/source-controller/pull/1507) + [#1576](https://github.com/fluxcd/source-controller/pull/1576) + [#1578](https://github.com/fluxcd/source-controller/pull/1578) + [#1579](https://github.com/fluxcd/source-controller/pull/1579) + [#1583](https://github.com/fluxcd/source-controller/pull/1583) + [#1588](https://github.com/fluxcd/source-controller/pull/1588) + [#1603](https://github.com/fluxcd/source-controller/pull/1603) + [#1610](https://github.com/fluxcd/source-controller/pull/1610) + [#1614](https://github.com/fluxcd/source-controller/pull/1614) + [#1618](https://github.com/fluxcd/source-controller/pull/1618) + +## 1.3.0 + +**Release date:** 2024-05-03 + +This minor release promotes the Helm APIs to GA, and comes with new features, +improvements and bug fixes. + +### HelmRepository + +The `HelmRepository` API has been promoted from `v1beta2` to `v1` (GA). +The `v1` API is backwards compatible with `v1beta2`. + +For `HelmRepository` of type `oci`, the `.spec.insecure` field allows connecting +over HTTP to an insecure non-TLS container registry. + +To upgrade from `v1beta2`, after deploying the new CRD and controller, +set `apiVersion: source.toolkit.fluxcd.io/v1` in the YAML files that +contain `HelmRepository` definitions. +Bumping the API version in manifests can be done gradually. +It is advised not to delay this procedure as the beta versions will be removed after 6 months. + +### HelmChart + +The `HelmChart` API have been promoted from `v1beta2` to `v1` (GA). +The `v1` API is backwards compatible with `v1beta2`, with the exception +of the removal of the deprecated field `.spec.valuesFile` which was replaced with `spec.valuesFiles`. + +The `HelmChart` API was extended with support for +[Notation signature verification](https://github.com/fluxcd/source-controller/blob/release/v1.3.x/docs/spec/v1/helmcharts.md#notation) +of Helm OCI charts. + +A new optional field `.spec.ignoreMissingValuesFiles` has been added, +which allows the controller to ignore missing values files rather than failing to reconcile the `HelmChart`. + +### OCIRepository + +The `OCIRepository` API was extended with support for +[Notation signature verification](https://github.com/fluxcd/source-controller/blob/release/v1.3.x/docs/spec/v1beta2/ocirepositories.md#notation) +of OCI artifacts. + +A new optional field `.spec.ref.semverFilter` has been added, +which allows the controller to filter the tags based on regular expressions +before applying the semver range. This allows +[picking the latest release candidate](https://github.com/fluxcd/source-controller/blob/release/v1.3.x/docs/spec/v1beta2/ocirepositories.md#semverfilter-example) +instead of the latest stable release. + +In addition, the controller has been updated to Kubernetes v1.30.0, +Helm v3.14.4, and various other dependencies to their latest version +to patch upstream CVEs. + +Improvements: +- Promote Helm APIs to `source.toolkit.fluxcd.io/v1` (GA) + [#1428](https://github.com/fluxcd/source-controller/pull/1428) +- Add `.spec.ignoreMissingValuesFiles` to HelmChart API + [#1447](https://github.com/fluxcd/source-controller/pull/1447) +- Implement `.spec.ref.semverFilter` in OCIRepository API + [#1407](https://github.com/fluxcd/source-controller/pull/1407) +- Helm: Allow insecure registry login + [#1412](https://github.com/fluxcd/source-controller/pull/1442) +- Add support for Notation verification to HelmChart and OCIRepository + [#1075](https://github.com/fluxcd/source-controller/pull/1075) +- Various dependency updates + [#1442](https://github.com/fluxcd/source-controller/pull/1442) + [#1450](https://github.com/fluxcd/source-controller/pull/1450) + [#1469](https://github.com/fluxcd/source-controller/pull/1469) + [#1378](https://github.com/fluxcd/source-controller/pull/1378) + +Fixes: +- Bind cached helm index to the maximum index size + [#1457](https://github.com/fluxcd/source-controller/pull/1457) +- Remove `genclient:Namespaced` tag + [#1386](https://github.com/fluxcd/source-controller/pull/1386) + +## 1.2.5 + +**Release date:** 2024-04-04 + +This patch release comes with improvements to the `HelmChart` name validation +and adds logging sanitization of connection error messages for `Bucket` sources. + +Fixes: +- Improve chart name validation + [#1377](https://github.com/fluxcd/source-controller/pull/1377) +- Sanitize URLs for bucket fetch error messages + [#1430](https://github.com/fluxcd/source-controller/pull/1430) + +Improvements: +- Update controller-gen to v0.14.0 + [#1399](https://github.com/fluxcd/source-controller/pull/1399) + ## 1.2.4 **Release date:** 2024-02-01 diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 072e7232b..11d05ad83 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -15,7 +15,7 @@ There are a number of dependencies required to be able to run the controller and In addition to the above, the following dependencies are also used by some of the `make` targets: -- `controller-gen` (v0.12.0) +- `controller-gen` (v0.19.0) - `gen-crd-api-reference-docs` (v0.3.0) - `setup-envtest` (latest) @@ -24,7 +24,7 @@ If any of the above dependencies are not present on your system, the first invoc ## How to run the test suite Prerequisites: -* Go >= 1.21 +* Go >= 1.25 You can run the test suite by simply doing @@ -58,7 +58,7 @@ make run ### Building the container image -Set the name of the container image to be created from the source code. This will be used +Set the name of the container image to be created from the source code. This will be used when building, pushing and referring to the image on YAML files: ```sh @@ -79,7 +79,7 @@ make docker-push ``` Alternatively, the three steps above can be done in a single line: - + ```sh IMG=registry-path/source-controller TAG=latest BUILD_ARGS=--push \ make docker-build @@ -128,7 +128,8 @@ Create a `.vscode/launch.json` file: "type": "go", "request": "launch", "mode": "auto", - "program": "${workspaceFolder}/main.go" + "program": "${workspaceFolder}/main.go", + "args": ["--storage-adv-addr=:0", "--storage-path=${workspaceFolder}/bin/data"] } ] } diff --git a/Dockerfile b/Dockerfile index 2396e5af2..0f7c6f849 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,30 +1,16 @@ -ARG BASE_VARIANT=alpine -ARG GO_VERSION=1.21 -ARG XX_VERSION=1.3.0 +ARG GO_VERSION=1.25 +ARG XX_VERSION=1.6.1 FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx -FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-${BASE_VARIANT} as gostable - -FROM gostable AS go-linux - -# Build-base consists of build platform dependencies and xx. -# These will be used at current arch to yield execute the cross compilations. -FROM go-${TARGETOS} AS build-base - -RUN apk add --no-cache clang lld +# Docker buildkit multi-arch build requires golang alpine +FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS builder +# Copy the build utilities. COPY --from=xx / / -# build-go-mod can still be cached at build platform architecture. -FROM build-base as build - ARG TARGETPLATFORM -# Some dependencies have to installed -# for the target platform: https://github.com/tonistiigi/xx#go--cgo -RUN xx-apk add musl-dev gcc clang lld - # Configure workspace WORKDIR /workspace @@ -40,34 +26,22 @@ RUN go mod download # Copy source code COPY main.go main.go -COPY pkg/ pkg/ COPY internal/ internal/ ARG TARGETPLATFORM ARG TARGETARCH -# Reasons why CGO is in use: -# - The SHA1 implementation (sha1cd) used by go-git depends on CGO for -# performance reasons. See: https://github.com/pjbgf/sha1cd/issues/15 -ENV CGO_ENABLED=1 - -RUN export CGO_LDFLAGS="-static -fuse-ld=lld" && \ - xx-go build \ - -ldflags "-s -w" \ - -tags 'netgo,osusergo,static_build' \ - -o /source-controller -trimpath main.go; - -# Ensure that the binary was cross-compiled correctly to the target platform. -RUN xx-verify --static /source-controller +# build without specifing the arch +ENV CGO_ENABLED=0 +RUN xx-go build -trimpath -a -o source-controller main.go -FROM alpine:3.19 +FROM alpine:3.22 ARG TARGETPLATFORM RUN apk --no-cache add ca-certificates \ && update-ca-certificates -# Copy over binary from build -COPY --from=build /source-controller /usr/local/bin/ +COPY --from=builder /workspace/source-controller /usr/local/bin/ USER 65534:65534 ENTRYPOINT [ "source-controller" ] diff --git a/MAINTAINERS b/MAINTAINERS index a4b4f6ae5..3a1bb4156 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6,3 +6,5 @@ In additional to those listed below, this project shares maintainers from the main Flux v2 git repository, as listed in https://github.com/fluxcd/flux2/blob/main/MAINTAINERS + +Dipti Pai, Microsoft (github: @dipti-pai, slack: Dipti Pai) diff --git a/Makefile b/Makefile index 749d22bd7..28226af5d 100644 --- a/Makefile +++ b/Makefile @@ -38,7 +38,7 @@ FUZZ_TIME ?= 1m GO_STATIC_FLAGS=-ldflags "-s -w" -tags 'netgo,osusergo,static_build$(addprefix ,,$(GO_TAGS))' # API (doc) generation utilities -CONTROLLER_GEN_VERSION ?= v0.12.0 +CONTROLLER_GEN_VERSION ?= v0.19.0 GEN_API_REF_DOCS_VERSION ?= e327d0730470cbd61b06300f81c5fcf91c23c113 # If gobin not set, create one on ./build and add to path. @@ -61,9 +61,10 @@ ifeq ($(shell uname -s),Darwin) ENVTEST_ARCH=amd64 endif -all: build +all: manager -build: ## Build manager binary +# Build manager binary +manager: generate fmt vet go build $(GO_STATIC_FLAGS) -o $(BUILD_DIR)/bin/manager main.go KUBEBUILDER_ASSETS?="$(shell $(ENVTEST) --arch=$(ENVTEST_ARCH) use -i $(ENVTEST_KUBERNETES_VERSION) --bin-dir=$(ENVTEST_ASSETS_DIR) -p path)" @@ -114,12 +115,11 @@ manifests: controller-gen ## Generate manifests, e.g. CRD, RBAC, etc. cd api; $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role paths="./..." output:crd:artifacts:config="../config/crd/bases" api-docs: gen-crd-api-reference-docs ## Generate API reference documentation - $(GEN_CRD_API_REFERENCE_DOCS) -api-dir=./api/v1beta2 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/v1beta2/source.md $(GEN_CRD_API_REFERENCE_DOCS) -api-dir=./api/v1 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/v1/source.md tidy: ## Run go mod tidy - cd api; rm -f go.sum; go mod tidy -compat=1.20 - rm -f go.sum; go mod tidy -compat=1.21 + cd api; rm -f go.sum; go mod tidy -compat=1.25 + rm -f go.sum; go mod tidy -compat=1.25 fmt: ## Run go fmt against code go fmt ./... @@ -188,7 +188,7 @@ TMP_DIR=$$(mktemp -d) ;\ cd $$TMP_DIR ;\ go mod init tmp ;\ echo "Downloading $(2)" ;\ -env -i bash -c "GOBIN=$(GOBIN) PATH=$(PATH) GOPATH=$(shell go env GOPATH) GOCACHE=$(shell go env GOCACHE) go install $(2)" ;\ +env -i bash -c "GOBIN=$(GOBIN) PATH=\"$(PATH)\" GOPATH=$(shell go env GOPATH) GOCACHE=$(shell go env GOCACHE) go install $(2)" ;\ rm -rf $$TMP_DIR ;\ } endef diff --git a/PROJECT b/PROJECT index 7e43c7b24..9d89d81be 100644 --- a/PROJECT +++ b/PROJECT @@ -7,9 +7,15 @@ resources: - group: source kind: GitRepository version: v1beta2 +- group: source + kind: HelmRepository + version: v1 - group: source kind: HelmRepository version: v1beta2 +- group: source + kind: HelmChart + version: v1 - group: source kind: HelmChart version: v1beta2 @@ -31,4 +37,13 @@ resources: - group: source kind: OCIRepository version: v1beta2 +- group: source + kind: Bucket + version: v1 +- group: source + kind: OCIRepository + version: v1 +- group: source + kind: ExternalArtifact + version: v1 version: "2" diff --git a/README.md b/README.md index ab4d4f1ef..6f07b2e00 100644 --- a/README.md +++ b/README.md @@ -16,18 +16,18 @@ and is a core component of the [GitOps toolkit](https://fluxcd.io/flux/component ## APIs -| Kind | API Version | -|---------------------------------------------------------|------------------------------------| -| [GitRepository](docs/spec/v1/gitrepositories.md) | `source.toolkit.fluxcd.io/v1` | -| [OCIRepository](docs/spec/v1beta2/ocirepositories.md) | `source.toolkit.fluxcd.io/v1beta2` | -| [HelmRepository](docs/spec/v1beta2/helmrepositories.md) | `source.toolkit.fluxcd.io/v1beta2` | -| [HelmChart](docs/spec/v1beta2/helmcharts.md) | `source.toolkit.fluxcd.io/v1beta2` | -| [Bucket](docs/spec/v1beta2/buckets.md) | `source.toolkit.fluxcd.io/v1beta2` | +| Kind | API Version | +|----------------------------------------------------|-------------------------------| +| [GitRepository](docs/spec/v1/gitrepositories.md) | `source.toolkit.fluxcd.io/v1` | +| [OCIRepository](docs/spec/v1/ocirepositories.md) | `source.toolkit.fluxcd.io/v1` | +| [HelmRepository](docs/spec/v1/helmrepositories.md) | `source.toolkit.fluxcd.io/v1` | +| [HelmChart](docs/spec/v1/helmcharts.md) | `source.toolkit.fluxcd.io/v1` | +| [Bucket](docs/spec/v1/buckets.md) | `source.toolkit.fluxcd.io/v1` | ## Features * authenticates to sources (SSH, user/password, API token, Workload Identity) -* validates source authenticity (PGP, Cosign) +* validates source authenticity (PGP, Cosign, Notation) * detects source changes based on update policies (semver) * fetches resources on-demand and on-a-schedule * packages the fetched resources into a well-known format (tar.gz, yaml) diff --git a/api/go.mod b/api/go.mod index 66b0499bd..3d821f349 100644 --- a/api/go.mod +++ b/api/go.mod @@ -1,32 +1,34 @@ module github.com/fluxcd/source-controller/api -go 1.20 +go 1.25.0 require ( - github.com/fluxcd/pkg/apis/acl v0.1.0 - github.com/fluxcd/pkg/apis/meta v1.3.0 - k8s.io/apimachinery v0.28.6 - sigs.k8s.io/controller-runtime v0.16.3 + github.com/fluxcd/pkg/apis/acl v0.9.0 + github.com/fluxcd/pkg/apis/meta v1.21.0 + k8s.io/apimachinery v0.34.0 + sigs.k8s.io/controller-runtime v0.22.1 ) // Fix CVE-2022-28948 replace gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1 require ( - github.com/go-logr/logr v1.3.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/google/gofuzz v1.2.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/kr/text v0.2.0 // indirect + github.com/kr/pretty v0.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - golang.org/x/net v0.20.0 // indirect - golang.org/x/text v0.14.0 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/x448/float16 v0.8.4 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/text v0.28.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/klog/v2 v2.110.1 // indirect - k8s.io/utils v0.0.0-20231127182322-b307cd553661 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect ) diff --git a/api/go.sum b/api/go.sum index e3556b12b..1aa815d66 100644 --- a/api/go.sum +++ b/api/go.sum @@ -2,27 +2,30 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fluxcd/pkg/apis/acl v0.1.0 h1:EoAl377hDQYL3WqanWCdifauXqXbMyFuK82NnX6pH4Q= -github.com/fluxcd/pkg/apis/acl v0.1.0/go.mod h1:zfEZzz169Oap034EsDhmCAGgnWlcWmIObZjYMusoXS8= -github.com/fluxcd/pkg/apis/meta v1.3.0 h1:KxeEc6olmSZvQ5pBONPE4IKxyoWQbqTJF1X6K5nIXpU= -github.com/fluxcd/pkg/apis/meta v1.3.0/go.mod h1:3Ui8xFkoU4sYehqmscjpq7NjqH2YN1A2iX2okbO3/yA= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/fluxcd/pkg/apis/acl v0.9.0 h1:wBpgsKT+jcyZEcM//OmZr9RiF8klL3ebrDp2u2ThsnA= +github.com/fluxcd/pkg/apis/acl v0.9.0/go.mod h1:TttNS+gocsGLwnvmgVi3/Yscwqrjc17+vhgYfqkfrV4= +github.com/fluxcd/pkg/apis/meta v1.21.0 h1:R+bN02chcs0HUmyVDQhqe/FHmYLjipVDMLnyYfNX850= +github.com/fluxcd/pkg/apis/meta v1.21.0/go.mod h1:XUAEUgT4gkWDAEN79E141tmL+v4SV50tVZ/Ojpc/ueg= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -30,19 +33,31 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= -github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -52,24 +67,26 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= +golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -79,21 +96,23 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -k8s.io/api v0.28.3 h1:Gj1HtbSdB4P08C8rs9AR94MfSGpRhJgsS+GF9V26xMM= -k8s.io/apimachinery v0.28.6 h1:RsTeR4z6S07srPg6XYrwXpTJVMXsjPXn0ODakMytSW0= -k8s.io/apimachinery v0.28.6/go.mod h1:QFNX/kCl/EMT2WTSz8k4WLCv2XnkOLMaL8GAVRMdpsA= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= -k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4= -sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE= +k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug= +k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0= +k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.22.1 h1:Ah1T7I+0A7ize291nJZdS1CabF/lB4E++WizgV24Eqg= +sigs.k8s.io/controller-runtime v0.22.1/go.mod h1:FwiwRjkRPbiN+zp2QRp7wlTCzbUXxZ/D4OzuQUDwBHY= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/api/v1/artifact_types.go b/api/v1/artifact_types.go deleted file mode 100644 index 9342ecfa6..000000000 --- a/api/v1/artifact_types.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2023 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "path" - "strings" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Artifact represents the output of a Source reconciliation. -type Artifact struct { - // Path is the relative file path of the Artifact. It can be used to locate - // the file in the root of the Artifact storage on the local file system of - // the controller managing the Source. - // +required - Path string `json:"path"` - - // URL is the HTTP address of the Artifact as exposed by the controller - // managing the Source. It can be used to retrieve the Artifact for - // consumption, e.g. by another controller applying the Artifact contents. - // +required - URL string `json:"url"` - - // Revision is a human-readable identifier traceable in the origin source - // system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. - // +required - Revision string `json:"revision"` - - // Digest is the digest of the file in the form of ':'. - // +optional - // +kubebuilder:validation:Pattern="^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$" - Digest string `json:"digest,omitempty"` - - // LastUpdateTime is the timestamp corresponding to the last update of the - // Artifact. - // +required - LastUpdateTime metav1.Time `json:"lastUpdateTime"` - - // Size is the number of bytes in the file. - // +optional - Size *int64 `json:"size,omitempty"` - - // Metadata holds upstream information such as OCI annotations. - // +optional - Metadata map[string]string `json:"metadata,omitempty"` -} - -// HasRevision returns if the given revision matches the current Revision of -// the Artifact. -func (in *Artifact) HasRevision(revision string) bool { - if in == nil { - return false - } - return in.Revision == revision -} - -// HasDigest returns if the given digest matches the current Digest of the -// Artifact. -func (in *Artifact) HasDigest(digest string) bool { - if in == nil { - return false - } - return in.Digest == digest -} - -// ArtifactDir returns the artifact dir path in the form of -// '//'. -func ArtifactDir(kind, namespace, name string) string { - kind = strings.ToLower(kind) - return path.Join(kind, namespace, name) -} - -// ArtifactPath returns the artifact path in the form of -// '//name>/'. -func ArtifactPath(kind, namespace, name, filename string) string { - return path.Join(ArtifactDir(kind, namespace, name), filename) -} diff --git a/api/v1/bucket_types.go b/api/v1/bucket_types.go new file mode 100644 index 000000000..bbedcefb3 --- /dev/null +++ b/api/v1/bucket_types.go @@ -0,0 +1,281 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // BucketKind is the string representation of a Bucket. + BucketKind = "Bucket" +) + +const ( + // BucketProviderGeneric for any S3 API compatible storage Bucket. + BucketProviderGeneric string = "generic" + // BucketProviderAmazon for an AWS S3 object storage Bucket. + // Provides support for retrieving credentials from the AWS EC2 service + // and workload identity authentication. + BucketProviderAmazon string = "aws" + // BucketProviderGoogle for a Google Cloud Storage Bucket. + // Provides support for authentication using a workload identity. + BucketProviderGoogle string = "gcp" + // BucketProviderAzure for an Azure Blob Storage Bucket. + // Provides support for authentication using a Service Principal, + // Managed Identity or Shared Key. + BucketProviderAzure string = "azure" +) + +// BucketSpec specifies the required configuration to produce an Artifact for +// an object storage bucket. +// +kubebuilder:validation:XValidation:rule="self.provider == 'aws' || self.provider == 'generic' || !has(self.sts)", message="STS configuration is only supported for the 'aws' and 'generic' Bucket providers" +// +kubebuilder:validation:XValidation:rule="self.provider != 'aws' || !has(self.sts) || self.sts.provider == 'aws'", message="'aws' is the only supported STS provider for the 'aws' Bucket provider" +// +kubebuilder:validation:XValidation:rule="self.provider != 'generic' || !has(self.sts) || self.sts.provider == 'ldap'", message="'ldap' is the only supported STS provider for the 'generic' Bucket provider" +// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.secretRef)", message="spec.sts.secretRef is not required for the 'aws' STS provider" +// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.certSecretRef)", message="spec.sts.certSecretRef is not required for the 'aws' STS provider" +// +kubebuilder:validation:XValidation:rule="self.provider != 'generic' || !has(self.serviceAccountName)", message="ServiceAccountName is not supported for the 'generic' Bucket provider" +// +kubebuilder:validation:XValidation:rule="!has(self.secretRef) || !has(self.serviceAccountName)", message="cannot set both .spec.secretRef and .spec.serviceAccountName" +type BucketSpec struct { + // Provider of the object storage bucket. + // Defaults to 'generic', which expects an S3 (API) compatible object + // storage. + // +kubebuilder:validation:Enum=generic;aws;gcp;azure + // +kubebuilder:default:=generic + // +optional + Provider string `json:"provider,omitempty"` + + // BucketName is the name of the object storage bucket. + // +required + BucketName string `json:"bucketName"` + + // Endpoint is the object storage address the BucketName is located at. + // +required + Endpoint string `json:"endpoint"` + + // STS specifies the required configuration to use a Security Token + // Service for fetching temporary credentials to authenticate in a + // Bucket provider. + // + // This field is only supported for the `aws` and `generic` providers. + // +optional + STS *BucketSTSSpec `json:"sts,omitempty"` + + // Insecure allows connecting to a non-TLS HTTP Endpoint. + // +optional + Insecure bool `json:"insecure,omitempty"` + + // Region of the Endpoint where the BucketName is located in. + // +optional + Region string `json:"region,omitempty"` + + // Prefix to use for server-side filtering of files in the Bucket. + // +optional + Prefix string `json:"prefix,omitempty"` + + // SecretRef specifies the Secret containing authentication credentials + // for the Bucket. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate + // the bucket. This field is only supported for the 'gcp' and 'aws' providers. + // For more information about workload identity: + // https://fluxcd.io/flux/components/source/buckets/#workload-identity + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // bucket. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // + // This field is only supported for the `generic` provider. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` + + // ProxySecretRef specifies the Secret containing the proxy configuration + // to use while communicating with the Bucket server. + // +optional + ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"` + + // Interval at which the Bucket Endpoint is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +required + Interval metav1.Duration `json:"interval"` + + // Timeout for fetch operations, defaults to 60s. + // +kubebuilder:default="60s" + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Ignore overrides the set of excluded patterns in the .sourceignore format + // (which is the same as .gitignore). If not provided, a default will be used, + // consult the documentation for your version to find out what those are. + // +optional + Ignore *string `json:"ignore,omitempty"` + + // Suspend tells the controller to suspend the reconciliation of this + // Bucket. + // +optional + Suspend bool `json:"suspend,omitempty"` +} + +// BucketSTSSpec specifies the required configuration to use a Security Token +// Service for fetching temporary credentials to authenticate in a Bucket +// provider. +type BucketSTSSpec struct { + // Provider of the Security Token Service. + // +kubebuilder:validation:Enum=aws;ldap + // +required + Provider string `json:"provider"` + + // Endpoint is the HTTP/S endpoint of the Security Token Service from + // where temporary credentials will be fetched. + // +required + // +kubebuilder:validation:Pattern="^(http|https)://.*$" + Endpoint string `json:"endpoint"` + + // SecretRef specifies the Secret containing authentication credentials + // for the STS endpoint. This Secret must contain the fields `username` + // and `password` and is supported only for the `ldap` provider. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // STS endpoint. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // + // This field is only supported for the `ldap` provider. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` +} + +// BucketStatus records the observed state of a Bucket. +type BucketStatus struct { + // ObservedGeneration is the last observed generation of the Bucket object. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the Bucket. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the dynamic fetch link for the latest Artifact. + // It is provided on a "best effort" basis, and using the precise + // BucketStatus.Artifact data is recommended. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the last successful Bucket reconciliation. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + // ObservedIgnore is the observed exclusion patterns used for constructing + // the source artifact. + // +optional + ObservedIgnore *string `json:"observedIgnore,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // BucketOperationSucceededReason signals that the Bucket listing and fetch + // operations succeeded. + BucketOperationSucceededReason string = "BucketOperationSucceeded" + + // BucketOperationFailedReason signals that the Bucket listing or fetch + // operations failed. + BucketOperationFailedReason string = "BucketOperationFailed" +) + +// GetConditions returns the status conditions of the object. +func (in *Bucket) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *Bucket) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in *Bucket) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. +func (in *Bucket) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// +genclient +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" + +// Bucket is the Schema for the buckets API. +type Bucket struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BucketSpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status BucketStatus `json:"status,omitempty"` +} + +// BucketList contains a list of Bucket objects. +// +kubebuilder:object:root=true +type BucketList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Bucket `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Bucket{}, &BucketList{}) +} diff --git a/api/v1/condition_types.go b/api/v1/condition_types.go index 72c7e67a2..9641db99c 100644 --- a/api/v1/condition_types.go +++ b/api/v1/condition_types.go @@ -108,4 +108,11 @@ const ( // PatchOperationFailedReason signals a failure in patching a kubernetes API // object. PatchOperationFailedReason string = "PatchOperationFailed" + + // InvalidSTSConfigurationReason signals that the STS configurtion is invalid. + InvalidSTSConfigurationReason string = "InvalidSTSConfiguration" + + // InvalidProviderConfigurationReason signals that the provider + // configuration is invalid. + InvalidProviderConfigurationReason string = "InvalidProviderConfiguration" ) diff --git a/api/v1/externalartifact_types.go b/api/v1/externalartifact_types.go new file mode 100644 index 000000000..e338b733b --- /dev/null +++ b/api/v1/externalartifact_types.go @@ -0,0 +1,97 @@ +/* +Copyright 2025 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/meta" +) + +// ExternalArtifactKind is the string representation of the ExternalArtifact. +const ExternalArtifactKind = "ExternalArtifact" + +// ExternalArtifactSpec defines the desired state of ExternalArtifact +type ExternalArtifactSpec struct { + // SourceRef points to the Kubernetes custom resource for + // which the artifact is generated. + // +optional + SourceRef *meta.NamespacedObjectKindReference `json:"sourceRef,omitempty"` +} + +// ExternalArtifactStatus defines the observed state of ExternalArtifact +type ExternalArtifactStatus struct { + // Artifact represents the output of an ExternalArtifact reconciliation. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + // Conditions holds the conditions for the ExternalArtifact. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// GetConditions returns the status conditions of the object. +func (in *ExternalArtifact) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *ExternalArtifact) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetArtifact returns the latest Artifact from the ExternalArtifact if +// present in the status sub-resource. +func (in *ExternalArtifact) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// GetRequeueAfter returns the duration after which the ExternalArtifact +// must be reconciled again. +func (in *ExternalArtifact) GetRequeueAfter() time.Duration { + return time.Minute +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" +// +kubebuilder:printcolumn:name="Source",type="string",JSONPath=".spec.sourceRef.name",description="" + +// ExternalArtifact is the Schema for the external artifacts API +type ExternalArtifact struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ExternalArtifactSpec `json:"spec,omitempty"` + Status ExternalArtifactStatus `json:"status,omitempty"` +} + +// ExternalArtifactList contains a list of ExternalArtifact +// +kubebuilder:object:root=true +type ExternalArtifactList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ExternalArtifact `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ExternalArtifact{}, &ExternalArtifactList{}) +} diff --git a/api/v1/gitrepository_types.go b/api/v1/gitrepository_types.go index 0d3b3abea..f104fd0f1 100644 --- a/api/v1/gitrepository_types.go +++ b/api/v1/gitrepository_types.go @@ -27,6 +27,18 @@ import ( const ( // GitRepositoryKind is the string representation of a GitRepository. GitRepositoryKind = "GitRepository" + + // GitProviderGeneric provides support for authentication using + // credentials specified in secretRef. + GitProviderGeneric string = "generic" + + // GitProviderAzure provides support for authentication to azure + // repositories using Managed Identity. + GitProviderAzure string = "azure" + + // GitProviderGitHub provides support for authentication to git + // repositories using GitHub App authentication + GitProviderGitHub string = "github" ) const ( @@ -65,6 +77,7 @@ const ( // GitRepositorySpec specifies the required configuration to produce an // Artifact for a Git repository. +// +kubebuilder:validation:XValidation:rule="!has(self.serviceAccountName) || (has(self.provider) && self.provider == 'azure')",message="serviceAccountName can only be set when provider is 'azure'" type GitRepositorySpec struct { // URL specifies the Git repository URL, it can be an HTTP/S or SSH address. // +kubebuilder:validation:Pattern="^(http|https|ssh)://.*$" @@ -80,6 +93,17 @@ type GitRepositorySpec struct { // +optional SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + // Provider used for authentication, can be 'azure', 'github', 'generic'. + // When not specified, defaults to 'generic'. + // +kubebuilder:validation:Enum=generic;azure;github + // +optional + Provider string `json:"provider,omitempty"` + + // ServiceAccountName is the name of the Kubernetes ServiceAccount used to + // authenticate to the GitRepository. This field is only supported for 'azure' provider. + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + // Interval at which the GitRepository URL is checked for updates. // This interval is approximate and may be subject to jitter to ensure // efficient use of resources. @@ -130,6 +154,12 @@ type GitRepositorySpec struct { // should be included in the Artifact produced for this GitRepository. // +optional Include []GitRepositoryInclude `json:"include,omitempty"` + + // SparseCheckout specifies a list of directories to checkout when cloning + // the repository. If specified, only these directories are included in the + // Artifact produced for this GitRepository. + // +optional + SparseCheckout []string `json:"sparseCheckout,omitempty"` } // GitRepositoryInclude specifies a local reference to a GitRepository which @@ -226,12 +256,12 @@ type GitRepositoryStatus struct { // Artifact represents the last successful GitRepository reconciliation. // +optional - Artifact *Artifact `json:"artifact,omitempty"` + Artifact *meta.Artifact `json:"artifact,omitempty"` // IncludedArtifacts contains a list of the last successfully included // Artifacts as instructed by GitRepositorySpec.Include. // +optional - IncludedArtifacts []*Artifact `json:"includedArtifacts,omitempty"` + IncludedArtifacts []*meta.Artifact `json:"includedArtifacts,omitempty"` // ObservedIgnore is the observed exclusion patterns used for constructing // the source artifact. @@ -248,6 +278,11 @@ type GitRepositoryStatus struct { // +optional ObservedInclude []GitRepositoryInclude `json:"observedInclude,omitempty"` + // ObservedSparseCheckout is the observed list of directories used to + // produce the current Artifact. + // +optional + ObservedSparseCheckout []string `json:"observedSparseCheckout,omitempty"` + // SourceVerificationMode is the last used verification mode indicating // which Git object(s) have been verified. // +optional @@ -284,10 +319,18 @@ func (in GitRepository) GetRequeueAfter() time.Duration { // GetArtifact returns the latest Artifact from the GitRepository if present in // the status sub-resource. -func (in *GitRepository) GetArtifact() *Artifact { +func (in *GitRepository) GetArtifact() *meta.Artifact { return in.Status.Artifact } +// GetProvider returns the Git authentication provider. +func (v *GitRepository) GetProvider() string { + if v.Spec.Provider == "" { + return GitProviderGeneric + } + return v.Spec.Provider +} + // GetMode returns the declared GitVerificationMode, or a ModeGitHEAD default. func (v *GitRepositoryVerification) GetMode() GitVerificationMode { if v.Mode.Valid() { diff --git a/api/v1/helmchart_types.go b/api/v1/helmchart_types.go new file mode 100644 index 000000000..23cb24146 --- /dev/null +++ b/api/v1/helmchart_types.go @@ -0,0 +1,227 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/meta" +) + +// HelmChartKind is the string representation of a HelmChart. +const HelmChartKind = "HelmChart" + +// HelmChartSpec specifies the desired state of a Helm chart. +type HelmChartSpec struct { + // Chart is the name or path the Helm chart is available at in the + // SourceRef. + // +required + Chart string `json:"chart"` + + // Version is the chart version semver expression, ignored for charts from + // GitRepository and Bucket sources. Defaults to latest when omitted. + // +kubebuilder:default:=* + // +optional + Version string `json:"version,omitempty"` + + // SourceRef is the reference to the Source the chart is available at. + // +required + SourceRef LocalHelmChartSourceReference `json:"sourceRef"` + + // Interval at which the HelmChart SourceRef is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +required + Interval metav1.Duration `json:"interval"` + + // ReconcileStrategy determines what enables the creation of a new artifact. + // Valid values are ('ChartVersion', 'Revision'). + // See the documentation of the values for an explanation on their behavior. + // Defaults to ChartVersion when omitted. + // +kubebuilder:validation:Enum=ChartVersion;Revision + // +kubebuilder:default:=ChartVersion + // +optional + ReconcileStrategy string `json:"reconcileStrategy,omitempty"` + + // ValuesFiles is an alternative list of values files to use as the chart + // values (values.yaml is not included by default), expected to be a + // relative path in the SourceRef. + // Values files are merged in the order of this list with the last file + // overriding the first. Ignored when omitted. + // +optional + ValuesFiles []string `json:"valuesFiles,omitempty"` + + // IgnoreMissingValuesFiles controls whether to silently ignore missing values + // files rather than failing. + // +optional + IgnoreMissingValuesFiles bool `json:"ignoreMissingValuesFiles,omitempty"` + + // Suspend tells the controller to suspend the reconciliation of this + // source. + // +optional + Suspend bool `json:"suspend,omitempty"` + + // Verify contains the secret name containing the trusted public keys + // used to verify the signature and specifies which provider to use to check + // whether OCI image is authentic. + // This field is only supported when using HelmRepository source with spec.type 'oci'. + // Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified. + // +optional + Verify *OCIRepositoryVerification `json:"verify,omitempty"` +} + +const ( + // ReconcileStrategyChartVersion reconciles when the version of the Helm chart is different. + ReconcileStrategyChartVersion string = "ChartVersion" + + // ReconcileStrategyRevision reconciles when the Revision of the source is different. + ReconcileStrategyRevision string = "Revision" +) + +// LocalHelmChartSourceReference contains enough information to let you locate +// the typed referenced object at namespace level. +type LocalHelmChartSourceReference struct { + // APIVersion of the referent. + // +optional + APIVersion string `json:"apiVersion,omitempty"` + + // Kind of the referent, valid values are ('HelmRepository', 'GitRepository', + // 'Bucket'). + // +kubebuilder:validation:Enum=HelmRepository;GitRepository;Bucket + // +required + Kind string `json:"kind"` + + // Name of the referent. + // +required + Name string `json:"name"` +} + +// HelmChartStatus records the observed state of the HelmChart. +type HelmChartStatus struct { + // ObservedGeneration is the last observed generation of the HelmChart + // object. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // ObservedSourceArtifactRevision is the last observed Artifact.Revision + // of the HelmChartSpec.SourceRef. + // +optional + ObservedSourceArtifactRevision string `json:"observedSourceArtifactRevision,omitempty"` + + // ObservedChartName is the last observed chart name as specified by the + // resolved chart reference. + // +optional + ObservedChartName string `json:"observedChartName,omitempty"` + + // ObservedValuesFiles are the observed value files of the last successful + // reconciliation. + // It matches the chart in the last successfully reconciled artifact. + // +optional + ObservedValuesFiles []string `json:"observedValuesFiles,omitempty"` + + // Conditions holds the conditions for the HelmChart. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the dynamic fetch link for the latest Artifact. + // It is provided on a "best effort" basis, and using the precise + // BucketStatus.Artifact data is recommended. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the output of the last successful reconciliation. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // ChartPullSucceededReason signals that the pull of the Helm chart + // succeeded. + ChartPullSucceededReason string = "ChartPullSucceeded" + + // ChartPackageSucceededReason signals that the package of the Helm + // chart succeeded. + ChartPackageSucceededReason string = "ChartPackageSucceeded" +) + +// GetConditions returns the status conditions of the object. +func (in HelmChart) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *HelmChart) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the source must be +// reconciled again. +func (in HelmChart) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetArtifact returns the latest artifact from the source if present in the +// status sub-resource. +func (in *HelmChart) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// GetValuesFiles returns a merged list of HelmChartSpec.ValuesFiles. +func (in *HelmChart) GetValuesFiles() []string { + return in.Spec.ValuesFiles +} + +// +genclient +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=hc +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart` +// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version` +// +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind` +// +kubebuilder:printcolumn:name="Source Name",type=string,JSONPath=`.spec.sourceRef.name` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" + +// HelmChart is the Schema for the helmcharts API. +type HelmChart struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec HelmChartSpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status HelmChartStatus `json:"status,omitempty"` +} + +// HelmChartList contains a list of HelmChart objects. +// +kubebuilder:object:root=true +type HelmChartList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HelmChart `json:"items"` +} + +func init() { + SchemeBuilder.Register(&HelmChart{}, &HelmChartList{}) +} diff --git a/api/v1/helmrepository_types.go b/api/v1/helmrepository_types.go new file mode 100644 index 000000000..1c19064a5 --- /dev/null +++ b/api/v1/helmrepository_types.go @@ -0,0 +1,228 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // HelmRepositoryKind is the string representation of a HelmRepository. + HelmRepositoryKind = "HelmRepository" + // HelmRepositoryURLIndexKey is the key used for indexing HelmRepository + // objects by their HelmRepositorySpec.URL. + HelmRepositoryURLIndexKey = ".metadata.helmRepositoryURL" + // HelmRepositoryTypeDefault is the default HelmRepository type. + // It is used when no type is specified and corresponds to a Helm repository. + HelmRepositoryTypeDefault = "default" + // HelmRepositoryTypeOCI is the type for an OCI repository. + HelmRepositoryTypeOCI = "oci" +) + +// HelmRepositorySpec specifies the required configuration to produce an +// Artifact for a Helm repository index YAML. +type HelmRepositorySpec struct { + // URL of the Helm repository, a valid URL contains at least a protocol and + // host. + // +kubebuilder:validation:Pattern="^(http|https|oci)://.*$" + // +required + URL string `json:"url"` + + // SecretRef specifies the Secret containing authentication credentials + // for the HelmRepository. + // For HTTP/S basic auth the secret must contain 'username' and 'password' + // fields. + // Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile' + // keys is deprecated. Please use `.spec.certSecretRef` instead. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // registry. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // + // It takes precedence over the values specified in the Secret referred + // to by `.spec.secretRef`. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` + + // PassCredentials allows the credentials from the SecretRef to be passed + // on to a host that does not match the host as defined in URL. + // This may be required if the host of the advertised chart URLs in the + // index differ from the defined URL. + // Enabling this should be done with caution, as it can potentially result + // in credentials getting stolen in a MITM-attack. + // +optional + PassCredentials bool `json:"passCredentials,omitempty"` + + // Interval at which the HelmRepository URL is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +optional + Interval metav1.Duration `json:"interval,omitempty"` + + // Insecure allows connecting to a non-TLS HTTP container registry. + // This field is only taken into account if the .spec.type field is set to 'oci'. + // +optional + Insecure bool `json:"insecure,omitempty"` + + // Timeout is used for the index fetch operation for an HTTPS helm repository, + // and for remote OCI Repository operations like pulling for an OCI helm + // chart by the associated HelmChart. + // Its default value is 60s. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Suspend tells the controller to suspend the reconciliation of this + // HelmRepository. + // +optional + Suspend bool `json:"suspend,omitempty"` + + // AccessFrom specifies an Access Control List for allowing cross-namespace + // references to this object. + // NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 + // +optional + AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` + + // Type of the HelmRepository. + // When this field is set to "oci", the URL field value must be prefixed with "oci://". + // +kubebuilder:validation:Enum=default;oci + // +optional + Type string `json:"type,omitempty"` + + // Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + // This field is optional, and only taken into account if the .spec.type field is set to 'oci'. + // When not specified, defaults to 'generic'. + // +kubebuilder:validation:Enum=generic;aws;azure;gcp + // +kubebuilder:default:=generic + // +optional + Provider string `json:"provider,omitempty"` +} + +// HelmRepositoryStatus records the observed state of the HelmRepository. +type HelmRepositoryStatus struct { + // ObservedGeneration is the last observed generation of the HelmRepository + // object. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the HelmRepository. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the dynamic fetch link for the latest Artifact. + // It is provided on a "best effort" basis, and using the precise + // HelmRepositoryStatus.Artifact data is recommended. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the last successful HelmRepository reconciliation. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // IndexationFailedReason signals that the HelmRepository index fetch + // failed. + IndexationFailedReason string = "IndexationFailed" +) + +// GetConditions returns the status conditions of the object. +func (in HelmRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *HelmRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the source must be +// reconciled again. +func (in HelmRepository) GetRequeueAfter() time.Duration { + if in.Spec.Interval.Duration != 0 { + return in.Spec.Interval.Duration + } + return time.Minute +} + +// GetTimeout returns the timeout duration used for various operations related +// to this HelmRepository. +func (in HelmRepository) GetTimeout() time.Duration { + if in.Spec.Timeout != nil { + return in.Spec.Timeout.Duration + } + return time.Minute +} + +// GetArtifact returns the latest artifact from the source if present in the +// status sub-resource. +func (in *HelmRepository) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// +genclient +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=helmrepo +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" + +// HelmRepository is the Schema for the helmrepositories API. +type HelmRepository struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec HelmRepositorySpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status HelmRepositoryStatus `json:"status,omitempty"` +} + +// HelmRepositoryList contains a list of HelmRepository objects. +// +kubebuilder:object:root=true +type HelmRepositoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HelmRepository `json:"items"` +} + +func init() { + SchemeBuilder.Register(&HelmRepository{}, &HelmRepositoryList{}) +} diff --git a/api/v1/ocirepository_types.go b/api/v1/ocirepository_types.go new file mode 100644 index 000000000..8c4d3f0fc --- /dev/null +++ b/api/v1/ocirepository_types.go @@ -0,0 +1,296 @@ +/* +Copyright 2025 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // OCIRepositoryKind is the string representation of an OCIRepository. + OCIRepositoryKind = "OCIRepository" + + // OCIRepositoryPrefix is the prefix used for OCIRepository URLs. + OCIRepositoryPrefix = "oci://" + + // GenericOCIProvider provides support for authentication using static credentials + // for any OCI compatible API such as Docker Registry, GitHub Container Registry, + // Docker Hub, Quay, etc. + GenericOCIProvider string = "generic" + + // AmazonOCIProvider provides support for OCI authentication using AWS IRSA. + AmazonOCIProvider string = "aws" + + // GoogleOCIProvider provides support for OCI authentication using GCP workload identity. + GoogleOCIProvider string = "gcp" + + // AzureOCIProvider provides support for OCI authentication using a Azure Service Principal, + // Managed Identity or Shared Key. + AzureOCIProvider string = "azure" + + // OCILayerExtract defines the operation type for extracting the content from an OCI artifact layer. + OCILayerExtract = "extract" + + // OCILayerCopy defines the operation type for copying the content from an OCI artifact layer. + OCILayerCopy = "copy" +) + +// OCIRepositorySpec defines the desired state of OCIRepository +type OCIRepositorySpec struct { + // URL is a reference to an OCI artifact repository hosted + // on a remote container registry. + // +kubebuilder:validation:Pattern="^oci://.*$" + // +required + URL string `json:"url"` + + // The OCI reference to pull and monitor for changes, + // defaults to the latest tag. + // +optional + Reference *OCIRepositoryRef `json:"ref,omitempty"` + + // LayerSelector specifies which layer should be extracted from the OCI artifact. + // When not specified, the first layer found in the artifact is selected. + // +optional + LayerSelector *OCILayerSelector `json:"layerSelector,omitempty"` + + // The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + // When not specified, defaults to 'generic'. + // +kubebuilder:validation:Enum=generic;aws;azure;gcp + // +kubebuilder:default:=generic + // +optional + Provider string `json:"provider,omitempty"` + + // SecretRef contains the secret name containing the registry login + // credentials to resolve image metadata. + // The secret must be of type kubernetes.io/dockerconfigjson. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // Verify contains the secret name containing the trusted public keys + // used to verify the signature and specifies which provider to use to check + // whether OCI image is authentic. + // +optional + Verify *OCIRepositoryVerification `json:"verify,omitempty"` + + // ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate + // the image pull if the service account has attached pull secrets. For more information: + // https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // registry. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` + + // ProxySecretRef specifies the Secret containing the proxy configuration + // to use while communicating with the container registry. + // +optional + ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"` + + // Interval at which the OCIRepository URL is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +required + Interval metav1.Duration `json:"interval"` + + // The timeout for remote OCI Repository operations like pulling, defaults to 60s. + // +kubebuilder:default="60s" + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Ignore overrides the set of excluded patterns in the .sourceignore format + // (which is the same as .gitignore). If not provided, a default will be used, + // consult the documentation for your version to find out what those are. + // +optional + Ignore *string `json:"ignore,omitempty"` + + // Insecure allows connecting to a non-TLS HTTP container registry. + // +optional + Insecure bool `json:"insecure,omitempty"` + + // This flag tells the controller to suspend the reconciliation of this source. + // +optional + Suspend bool `json:"suspend,omitempty"` +} + +// OCIRepositoryRef defines the image reference for the OCIRepository's URL +type OCIRepositoryRef struct { + // Digest is the image digest to pull, takes precedence over SemVer. + // The value should be in the format 'sha256:'. + // +optional + Digest string `json:"digest,omitempty"` + + // SemVer is the range of tags to pull selecting the latest within + // the range, takes precedence over Tag. + // +optional + SemVer string `json:"semver,omitempty"` + + // SemverFilter is a regex pattern to filter the tags within the SemVer range. + // +optional + SemverFilter string `json:"semverFilter,omitempty"` + + // Tag is the image tag to pull, defaults to latest. + // +optional + Tag string `json:"tag,omitempty"` +} + +// OCILayerSelector specifies which layer should be extracted from an OCI Artifact +type OCILayerSelector struct { + // MediaType specifies the OCI media type of the layer + // which should be extracted from the OCI Artifact. The + // first layer matching this type is selected. + // +optional + MediaType string `json:"mediaType,omitempty"` + + // Operation specifies how the selected layer should be processed. + // By default, the layer compressed content is extracted to storage. + // When the operation is set to 'copy', the layer compressed content + // is persisted to storage as it is. + // +kubebuilder:validation:Enum=extract;copy + // +optional + Operation string `json:"operation,omitempty"` +} + +// OCIRepositoryStatus defines the observed state of OCIRepository +type OCIRepositoryStatus struct { + // ObservedGeneration is the last observed generation. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the OCIRepository. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the download link for the artifact output of the last OCI Repository sync. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the output of the last successful OCI Repository sync. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + // ObservedIgnore is the observed exclusion patterns used for constructing + // the source artifact. + // +optional + ObservedIgnore *string `json:"observedIgnore,omitempty"` + + // ObservedLayerSelector is the observed layer selector used for constructing + // the source artifact. + // +optional + ObservedLayerSelector *OCILayerSelector `json:"observedLayerSelector,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // OCIPullFailedReason signals that a pull operation failed. + OCIPullFailedReason string = "OCIArtifactPullFailed" + + // OCILayerOperationFailedReason signals that an OCI layer operation failed. + OCILayerOperationFailedReason string = "OCIArtifactLayerOperationFailed" +) + +// GetConditions returns the status conditions of the object. +func (in OCIRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *OCIRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the OCIRepository must be +// reconciled again. +func (in OCIRepository) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetArtifact returns the latest Artifact from the OCIRepository if present in +// the status sub-resource. +func (in *OCIRepository) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// GetLayerMediaType returns the media type layer selector if found in spec. +func (in *OCIRepository) GetLayerMediaType() string { + if in.Spec.LayerSelector == nil { + return "" + } + + return in.Spec.LayerSelector.MediaType +} + +// GetLayerOperation returns the layer selector operation (defaults to extract). +func (in *OCIRepository) GetLayerOperation() string { + if in.Spec.LayerSelector == nil || in.Spec.LayerSelector.Operation == "" { + return OCILayerExtract + } + + return in.Spec.LayerSelector.Operation +} + +// +genclient +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=ocirepo +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" + +// OCIRepository is the Schema for the ocirepositories API +type OCIRepository struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OCIRepositorySpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status OCIRepositoryStatus `json:"status,omitempty"` +} + +// OCIRepositoryList contains a list of OCIRepository +// +kubebuilder:object:root=true +type OCIRepositoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OCIRepository `json:"items"` +} + +func init() { + SchemeBuilder.Register(&OCIRepository{}, &OCIRepositoryList{}) +} diff --git a/api/v1/ociverification_types.go b/api/v1/ociverification_types.go new file mode 100644 index 000000000..de74be343 --- /dev/null +++ b/api/v1/ociverification_types.go @@ -0,0 +1,56 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "github.com/fluxcd/pkg/apis/meta" +) + +// OCIRepositoryVerification verifies the authenticity of an OCI Artifact +type OCIRepositoryVerification struct { + // Provider specifies the technology used to sign the OCI Artifact. + // +kubebuilder:validation:Enum=cosign;notation + // +kubebuilder:default:=cosign + Provider string `json:"provider"` + + // SecretRef specifies the Kubernetes Secret containing the + // trusted public keys. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // MatchOIDCIdentity specifies the identity matching criteria to use + // while verifying an OCI artifact which was signed using Cosign keyless + // signing. The artifact's identity is deemed to be verified if any of the + // specified matchers match against the identity. + // +optional + MatchOIDCIdentity []OIDCIdentityMatch `json:"matchOIDCIdentity,omitempty"` +} + +// OIDCIdentityMatch specifies options for verifying the certificate identity, +// i.e. the issuer and the subject of the certificate. +type OIDCIdentityMatch struct { + // Issuer specifies the regex pattern to match against to verify + // the OIDC issuer in the Fulcio certificate. The pattern must be a + // valid Go regular expression. + // +required + Issuer string `json:"issuer"` + // Subject specifies the regex pattern to match against to verify + // the identity subject in the Fulcio certificate. The pattern must + // be a valid Go regular expression. + // +required + Subject string `json:"subject"` +} diff --git a/api/v1/source.go b/api/v1/source.go index 83040bc22..d879f6034 100644 --- a/api/v1/source.go +++ b/api/v1/source.go @@ -20,6 +20,8 @@ import ( "time" "k8s.io/apimachinery/pkg/runtime" + + "github.com/fluxcd/pkg/apis/meta" ) const ( @@ -41,5 +43,5 @@ type Source interface { GetRequeueAfter() time.Duration // GetArtifact returns the latest artifact from the source if present in // the status sub-resource. - GetArtifact() *Artifact + GetArtifact() *meta.Artifact } diff --git a/api/v1/sts_types.go b/api/v1/sts_types.go new file mode 100644 index 000000000..4b1d05881 --- /dev/null +++ b/api/v1/sts_types.go @@ -0,0 +1,26 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + // STSProviderAmazon represents the AWS provider for Security Token Service. + // Provides support for fetching temporary credentials from an AWS STS endpoint. + STSProviderAmazon string = "aws" + // STSProviderLDAP represents the LDAP provider for Security Token Service. + // Provides support for fetching temporary credentials from an LDAP endpoint. + STSProviderLDAP string = "ldap" +) diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 8167c7136..14f1ba3c2 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -1,8 +1,7 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* -Copyright 2023 The Flux authors +Copyright 2025 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,35 +21,277 @@ limitations under the License. package v1 import ( + "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Artifact) DeepCopyInto(out *Artifact) { +func (in *Bucket) DeepCopyInto(out *Bucket) { *out = *in - in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) - if in.Size != nil { - in, out := &in.Size, &out.Size - *out = new(int64) + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bucket. +func (in *Bucket) DeepCopy() *Bucket { + if in == nil { + return nil + } + out := new(Bucket) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Bucket) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketList) DeepCopyInto(out *BucketList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Bucket, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketList. +func (in *BucketList) DeepCopy() *BucketList { + if in == nil { + return nil + } + out := new(BucketList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketSTSSpec) DeepCopyInto(out *BucketSTSSpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSTSSpec. +func (in *BucketSTSSpec) DeepCopy() *BucketSTSSpec { + if in == nil { + return nil + } + out := new(BucketSTSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketSpec) DeepCopyInto(out *BucketSpec) { + *out = *in + if in.STS != nil { + in, out := &in.STS, &out.STS + *out = new(BucketSTSSpec) + (*in).DeepCopyInto(*out) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.ProxySecretRef != nil { + in, out := &in.ProxySecretRef, &out.ProxySecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + if in.Ignore != nil { + in, out := &in.Ignore, &out.Ignore + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSpec. +func (in *BucketSpec) DeepCopy() *BucketSpec { + if in == nil { + return nil + } + out := new(BucketSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketStatus) DeepCopyInto(out *BucketStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + if in.ObservedIgnore != nil { + in, out := &in.ObservedIgnore, &out.ObservedIgnore + *out = new(string) + **out = **in + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketStatus. +func (in *BucketStatus) DeepCopy() *BucketStatus { + if in == nil { + return nil + } + out := new(BucketStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalArtifact) DeepCopyInto(out *ExternalArtifact) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalArtifact. +func (in *ExternalArtifact) DeepCopy() *ExternalArtifact { + if in == nil { + return nil + } + out := new(ExternalArtifact) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExternalArtifact) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalArtifactList) DeepCopyInto(out *ExternalArtifactList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ExternalArtifact, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalArtifactList. +func (in *ExternalArtifactList) DeepCopy() *ExternalArtifactList { + if in == nil { + return nil + } + out := new(ExternalArtifactList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExternalArtifactList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalArtifactSpec) DeepCopyInto(out *ExternalArtifactSpec) { + *out = *in + if in.SourceRef != nil { + in, out := &in.SourceRef, &out.SourceRef + *out = new(meta.NamespacedObjectKindReference) **out = **in } - if in.Metadata != nil { - in, out := &in.Metadata, &out.Metadata - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalArtifactSpec. +func (in *ExternalArtifactSpec) DeepCopy() *ExternalArtifactSpec { + if in == nil { + return nil + } + out := new(ExternalArtifactSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalArtifactStatus) DeepCopyInto(out *ExternalArtifactStatus) { + *out = *in + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Artifact. -func (in *Artifact) DeepCopy() *Artifact { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalArtifactStatus. +func (in *ExternalArtifactStatus) DeepCopy() *ExternalArtifactStatus { if in == nil { return nil } - out := new(Artifact) + out := new(ExternalArtifactStatus) in.DeepCopyInto(out) return out } @@ -184,6 +425,11 @@ func (in *GitRepositorySpec) DeepCopyInto(out *GitRepositorySpec) { *out = make([]GitRepositoryInclude, len(*in)) copy(*out, *in) } + if in.SparseCheckout != nil { + in, out := &in.SparseCheckout, &out.SparseCheckout + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositorySpec. @@ -208,16 +454,16 @@ func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) { } if in.Artifact != nil { in, out := &in.Artifact, &out.Artifact - *out = new(Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } if in.IncludedArtifacts != nil { in, out := &in.IncludedArtifacts, &out.IncludedArtifacts - *out = make([]*Artifact, len(*in)) + *out = make([]*meta.Artifact, len(*in)) for i := range *in { if (*in)[i] != nil { in, out := &(*in)[i], &(*out)[i] - *out = new(Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } } @@ -232,6 +478,11 @@ func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) { *out = make([]GitRepositoryInclude, len(*in)) copy(*out, *in) } + if in.ObservedSparseCheckout != nil { + in, out := &in.ObservedSparseCheckout, &out.ObservedSparseCheckout + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.SourceVerificationMode != nil { in, out := &in.SourceVerificationMode, &out.SourceVerificationMode *out = new(GitVerificationMode) @@ -265,3 +516,483 @@ func (in *GitRepositoryVerification) DeepCopy() *GitRepositoryVerification { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChart) DeepCopyInto(out *HelmChart) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChart. +func (in *HelmChart) DeepCopy() *HelmChart { + if in == nil { + return nil + } + out := new(HelmChart) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmChart) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChartList) DeepCopyInto(out *HelmChartList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HelmChart, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartList. +func (in *HelmChartList) DeepCopy() *HelmChartList { + if in == nil { + return nil + } + out := new(HelmChartList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmChartList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChartSpec) DeepCopyInto(out *HelmChartSpec) { + *out = *in + out.SourceRef = in.SourceRef + out.Interval = in.Interval + if in.ValuesFiles != nil { + in, out := &in.ValuesFiles, &out.ValuesFiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Verify != nil { + in, out := &in.Verify, &out.Verify + *out = new(OCIRepositoryVerification) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartSpec. +func (in *HelmChartSpec) DeepCopy() *HelmChartSpec { + if in == nil { + return nil + } + out := new(HelmChartSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) { + *out = *in + if in.ObservedValuesFiles != nil { + in, out := &in.ObservedValuesFiles, &out.ObservedValuesFiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartStatus. +func (in *HelmChartStatus) DeepCopy() *HelmChartStatus { + if in == nil { + return nil + } + out := new(HelmChartStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepository) DeepCopyInto(out *HelmRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepository. +func (in *HelmRepository) DeepCopy() *HelmRepository { + if in == nil { + return nil + } + out := new(HelmRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepositoryList) DeepCopyInto(out *HelmRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HelmRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositoryList. +func (in *HelmRepositoryList) DeepCopy() *HelmRepositoryList { + if in == nil { + return nil + } + out := new(HelmRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepositorySpec) DeepCopyInto(out *HelmRepositorySpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + if in.AccessFrom != nil { + in, out := &in.AccessFrom, &out.AccessFrom + *out = new(acl.AccessFrom) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositorySpec. +func (in *HelmRepositorySpec) DeepCopy() *HelmRepositorySpec { + if in == nil { + return nil + } + out := new(HelmRepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepositoryStatus) DeepCopyInto(out *HelmRepositoryStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositoryStatus. +func (in *HelmRepositoryStatus) DeepCopy() *HelmRepositoryStatus { + if in == nil { + return nil + } + out := new(HelmRepositoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalHelmChartSourceReference) DeepCopyInto(out *LocalHelmChartSourceReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalHelmChartSourceReference. +func (in *LocalHelmChartSourceReference) DeepCopy() *LocalHelmChartSourceReference { + if in == nil { + return nil + } + out := new(LocalHelmChartSourceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCILayerSelector) DeepCopyInto(out *OCILayerSelector) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCILayerSelector. +func (in *OCILayerSelector) DeepCopy() *OCILayerSelector { + if in == nil { + return nil + } + out := new(OCILayerSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepository) DeepCopyInto(out *OCIRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepository. +func (in *OCIRepository) DeepCopy() *OCIRepository { + if in == nil { + return nil + } + out := new(OCIRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OCIRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositoryList) DeepCopyInto(out *OCIRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OCIRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryList. +func (in *OCIRepositoryList) DeepCopy() *OCIRepositoryList { + if in == nil { + return nil + } + out := new(OCIRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OCIRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositoryRef) DeepCopyInto(out *OCIRepositoryRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryRef. +func (in *OCIRepositoryRef) DeepCopy() *OCIRepositoryRef { + if in == nil { + return nil + } + out := new(OCIRepositoryRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositorySpec) DeepCopyInto(out *OCIRepositorySpec) { + *out = *in + if in.Reference != nil { + in, out := &in.Reference, &out.Reference + *out = new(OCIRepositoryRef) + **out = **in + } + if in.LayerSelector != nil { + in, out := &in.LayerSelector, &out.LayerSelector + *out = new(OCILayerSelector) + **out = **in + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.Verify != nil { + in, out := &in.Verify, &out.Verify + *out = new(OCIRepositoryVerification) + (*in).DeepCopyInto(*out) + } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.ProxySecretRef != nil { + in, out := &in.ProxySecretRef, &out.ProxySecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + if in.Ignore != nil { + in, out := &in.Ignore, &out.Ignore + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositorySpec. +func (in *OCIRepositorySpec) DeepCopy() *OCIRepositorySpec { + if in == nil { + return nil + } + out := new(OCIRepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositoryStatus) DeepCopyInto(out *OCIRepositoryStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + if in.ObservedIgnore != nil { + in, out := &in.ObservedIgnore, &out.ObservedIgnore + *out = new(string) + **out = **in + } + if in.ObservedLayerSelector != nil { + in, out := &in.ObservedLayerSelector, &out.ObservedLayerSelector + *out = new(OCILayerSelector) + **out = **in + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryStatus. +func (in *OCIRepositoryStatus) DeepCopy() *OCIRepositoryStatus { + if in == nil { + return nil + } + out := new(OCIRepositoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositoryVerification) DeepCopyInto(out *OCIRepositoryVerification) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.MatchOIDCIdentity != nil { + in, out := &in.MatchOIDCIdentity, &out.MatchOIDCIdentity + *out = make([]OIDCIdentityMatch, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryVerification. +func (in *OCIRepositoryVerification) DeepCopy() *OCIRepositoryVerification { + if in == nil { + return nil + } + out := new(OCIRepositoryVerification) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OIDCIdentityMatch) DeepCopyInto(out *OIDCIdentityMatch) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCIdentityMatch. +func (in *OIDCIdentityMatch) DeepCopy() *OIDCIdentityMatch { + if in == nil { + return nil + } + out := new(OIDCIdentityMatch) + in.DeepCopyInto(out) + return out +} diff --git a/api/v1beta1/bucket_types.go b/api/v1beta1/bucket_types.go index 2b717547a..e64321c9d 100644 --- a/api/v1beta1/bucket_types.go +++ b/api/v1beta1/bucket_types.go @@ -194,11 +194,7 @@ func (in *Bucket) GetInterval() metav1.Duration { // +genclient // +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint` -// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:skipversion // Bucket is the Schema for the buckets API type Bucket struct { diff --git a/api/v1beta1/doc.go b/api/v1beta1/doc.go index 7a768a45d..f604a2624 100644 --- a/api/v1beta1/doc.go +++ b/api/v1beta1/doc.go @@ -15,6 +15,9 @@ limitations under the License. */ // Package v1beta1 contains API Schema definitions for the source v1beta1 API group +// +// Deprecated: v1beta1 is no longer supported, use v1 instead. +// // +kubebuilder:object:generate=true // +groupName=source.toolkit.fluxcd.io package v1beta1 diff --git a/api/v1beta1/gitrepository_types.go b/api/v1beta1/gitrepository_types.go index 8a4c46fe8..05cce7c60 100644 --- a/api/v1beta1/gitrepository_types.go +++ b/api/v1beta1/gitrepository_types.go @@ -267,12 +267,7 @@ func (in *GitRepository) GetInterval() metav1.Duration { // +genclient // +kubebuilder:object:root=true // +kubebuilder:resource:shortName=gitrepo -// +kubebuilder:subresource:status -// +kubebuilder:deprecatedversion:warning="v1beta1 GitRepository is deprecated, upgrade to v1" -// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` -// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:skipversion // GitRepository is the Schema for the gitrepositories API type GitRepository struct { diff --git a/api/v1beta1/helmchart_types.go b/api/v1beta1/helmchart_types.go index 74bcc2c12..22e5dda58 100644 --- a/api/v1beta1/helmchart_types.go +++ b/api/v1beta1/helmchart_types.go @@ -233,14 +233,7 @@ func (in *HelmChart) GetValuesFiles() []string { // +genclient // +kubebuilder:object:root=true // +kubebuilder:resource:shortName=hc -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart` -// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version` -// +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind` -// +kubebuilder:printcolumn:name="Source Name",type=string,JSONPath=`.spec.sourceRef.name` -// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:skipversion // HelmChart is the Schema for the helmcharts API type HelmChart struct { diff --git a/api/v1beta1/helmrepository_types.go b/api/v1beta1/helmrepository_types.go index 9151ff253..4530b82a9 100644 --- a/api/v1beta1/helmrepository_types.go +++ b/api/v1beta1/helmrepository_types.go @@ -183,11 +183,7 @@ func (in *HelmRepository) GetInterval() metav1.Duration { // +genclient // +kubebuilder:object:root=true // +kubebuilder:resource:shortName=helmrepo -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` -// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:skipversion // HelmRepository is the Schema for the helmrepositories API type HelmRepository struct { diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index fd3252bf3..10be7301e 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -1,8 +1,7 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* -Copyright 2023 The Flux authors +Copyright 2025 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/api/v1beta2/bucket_types.go b/api/v1beta2/bucket_types.go index 5d3d9c7d0..6495abdd0 100644 --- a/api/v1beta2/bucket_types.go +++ b/api/v1beta2/bucket_types.go @@ -33,22 +33,48 @@ const ( ) const ( + // BucketProviderGeneric for any S3 API compatible storage Bucket. + BucketProviderGeneric string = apiv1.BucketProviderGeneric + // BucketProviderAmazon for an AWS S3 object storage Bucket. + // Provides support for retrieving credentials from the AWS EC2 service. + BucketProviderAmazon string = apiv1.BucketProviderAmazon + // BucketProviderGoogle for a Google Cloud Storage Bucket. + // Provides support for authentication using a workload identity. + BucketProviderGoogle string = apiv1.BucketProviderGoogle + // BucketProviderAzure for an Azure Blob Storage Bucket. + // Provides support for authentication using a Service Principal, + // Managed Identity or Shared Key. + BucketProviderAzure string = apiv1.BucketProviderAzure + // GenericBucketProvider for any S3 API compatible storage Bucket. - GenericBucketProvider string = "generic" + // + // Deprecated: use BucketProviderGeneric. + GenericBucketProvider string = apiv1.BucketProviderGeneric // AmazonBucketProvider for an AWS S3 object storage Bucket. // Provides support for retrieving credentials from the AWS EC2 service. - AmazonBucketProvider string = "aws" + // + // Deprecated: use BucketProviderAmazon. + AmazonBucketProvider string = apiv1.BucketProviderAmazon // GoogleBucketProvider for a Google Cloud Storage Bucket. // Provides support for authentication using a workload identity. - GoogleBucketProvider string = "gcp" + // + // Deprecated: use BucketProviderGoogle. + GoogleBucketProvider string = apiv1.BucketProviderGoogle // AzureBucketProvider for an Azure Blob Storage Bucket. // Provides support for authentication using a Service Principal, // Managed Identity or Shared Key. - AzureBucketProvider string = "azure" + // + // Deprecated: use BucketProviderAzure. + AzureBucketProvider string = apiv1.BucketProviderAzure ) // BucketSpec specifies the required configuration to produce an Artifact for // an object storage bucket. +// +kubebuilder:validation:XValidation:rule="self.provider == 'aws' || self.provider == 'generic' || !has(self.sts)", message="STS configuration is only supported for the 'aws' and 'generic' Bucket providers" +// +kubebuilder:validation:XValidation:rule="self.provider != 'aws' || !has(self.sts) || self.sts.provider == 'aws'", message="'aws' is the only supported STS provider for the 'aws' Bucket provider" +// +kubebuilder:validation:XValidation:rule="self.provider != 'generic' || !has(self.sts) || self.sts.provider == 'ldap'", message="'ldap' is the only supported STS provider for the 'generic' Bucket provider" +// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.secretRef)", message="spec.sts.secretRef is not required for the 'aws' STS provider" +// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.certSecretRef)", message="spec.sts.certSecretRef is not required for the 'aws' STS provider" type BucketSpec struct { // Provider of the object storage bucket. // Defaults to 'generic', which expects an S3 (API) compatible object @@ -66,6 +92,14 @@ type BucketSpec struct { // +required Endpoint string `json:"endpoint"` + // STS specifies the required configuration to use a Security Token + // Service for fetching temporary credentials to authenticate in a + // Bucket provider. + // + // This field is only supported for the `aws` and `generic` providers. + // +optional + STS *BucketSTSSpec `json:"sts,omitempty"` + // Insecure allows connecting to a non-TLS HTTP Endpoint. // +optional Insecure bool `json:"insecure,omitempty"` @@ -83,6 +117,28 @@ type BucketSpec struct { // +optional SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // bucket. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // + // This field is only supported for the `generic` provider. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` + + // ProxySecretRef specifies the Secret containing the proxy configuration + // to use while communicating with the Bucket server. + // +optional + ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"` + // Interval at which the Bucket Endpoint is checked for updates. // This interval is approximate and may be subject to jitter to ensure // efficient use of resources. @@ -116,6 +172,45 @@ type BucketSpec struct { AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` } +// BucketSTSSpec specifies the required configuration to use a Security Token +// Service for fetching temporary credentials to authenticate in a Bucket +// provider. +type BucketSTSSpec struct { + // Provider of the Security Token Service. + // +kubebuilder:validation:Enum=aws;ldap + // +required + Provider string `json:"provider"` + + // Endpoint is the HTTP/S endpoint of the Security Token Service from + // where temporary credentials will be fetched. + // +required + // +kubebuilder:validation:Pattern="^(http|https)://.*$" + Endpoint string `json:"endpoint"` + + // SecretRef specifies the Secret containing authentication credentials + // for the STS endpoint. This Secret must contain the fields `username` + // and `password` and is supported only for the `ldap` provider. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // STS endpoint. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // + // This field is only supported for the `ldap` provider. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` +} + // BucketStatus records the observed state of a Bucket. type BucketStatus struct { // ObservedGeneration is the last observed generation of the Bucket object. @@ -134,7 +229,7 @@ type BucketStatus struct { // Artifact represents the last successful Bucket reconciliation. // +optional - Artifact *apiv1.Artifact `json:"artifact,omitempty"` + Artifact *meta.Artifact `json:"artifact,omitempty"` // ObservedIgnore is the observed exclusion patterns used for constructing // the source artifact. @@ -170,14 +265,14 @@ func (in Bucket) GetRequeueAfter() time.Duration { } // GetArtifact returns the latest artifact from the source if present in the status sub-resource. -func (in *Bucket) GetArtifact() *apiv1.Artifact { +func (in *Bucket) GetArtifact() *meta.Artifact { return in.Status.Artifact } // +genclient -// +kubebuilder:storageversion // +kubebuilder:object:root=true // +kubebuilder:subresource:status +// +kubebuilder:deprecatedversion:warning="v1beta2 Bucket is deprecated, upgrade to v1" // +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint` // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" diff --git a/api/v1beta2/gitrepository_types.go b/api/v1beta2/gitrepository_types.go index 2e8685cda..89beeb9a7 100644 --- a/api/v1beta2/gitrepository_types.go +++ b/api/v1beta2/gitrepository_types.go @@ -23,8 +23,6 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" - - apiv1 "github.com/fluxcd/source-controller/api/v1" ) const ( @@ -214,12 +212,12 @@ type GitRepositoryStatus struct { // Artifact represents the last successful GitRepository reconciliation. // +optional - Artifact *apiv1.Artifact `json:"artifact,omitempty"` + Artifact *meta.Artifact `json:"artifact,omitempty"` // IncludedArtifacts contains a list of the last successfully included // Artifacts as instructed by GitRepositorySpec.Include. // +optional - IncludedArtifacts []*apiv1.Artifact `json:"includedArtifacts,omitempty"` + IncludedArtifacts []*meta.Artifact `json:"includedArtifacts,omitempty"` // ContentConfigChecksum is a checksum of all the configurations related to // the content of the source artifact: @@ -282,7 +280,7 @@ func (in GitRepository) GetRequeueAfter() time.Duration { // GetArtifact returns the latest Artifact from the GitRepository if present in // the status sub-resource. -func (in *GitRepository) GetArtifact() *apiv1.Artifact { +func (in *GitRepository) GetArtifact() *meta.Artifact { return in.Status.Artifact } diff --git a/api/v1beta2/helmchart_types.go b/api/v1beta2/helmchart_types.go index 43f5984cc..ac24b1c13 100644 --- a/api/v1beta2/helmchart_types.go +++ b/api/v1beta2/helmchart_types.go @@ -23,6 +23,7 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + apiv1 "github.com/fluxcd/source-controller/api/v1" ) @@ -79,6 +80,11 @@ type HelmChartSpec struct { // +deprecated ValuesFile string `json:"valuesFile,omitempty"` + // IgnoreMissingValuesFiles controls whether to silently ignore missing values + // files rather than failing. + // +optional + IgnoreMissingValuesFiles bool `json:"ignoreMissingValuesFiles,omitempty"` + // Suspend tells the controller to suspend the reconciliation of this // source. // +optional @@ -96,7 +102,7 @@ type HelmChartSpec struct { // This field is only supported when using HelmRepository source with spec.type 'oci'. // Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified. // +optional - Verify *OCIRepositoryVerification `json:"verify,omitempty"` + Verify *apiv1.OCIRepositoryVerification `json:"verify,omitempty"` } const ( @@ -142,6 +148,12 @@ type HelmChartStatus struct { // +optional ObservedChartName string `json:"observedChartName,omitempty"` + // ObservedValuesFiles are the observed value files of the last successful + // reconciliation. + // It matches the chart in the last successfully reconciled artifact. + // +optional + ObservedValuesFiles []string `json:"observedValuesFiles,omitempty"` + // Conditions holds the conditions for the HelmChart. // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` @@ -154,7 +166,7 @@ type HelmChartStatus struct { // Artifact represents the output of the last successful reconciliation. // +optional - Artifact *apiv1.Artifact `json:"artifact,omitempty"` + Artifact *meta.Artifact `json:"artifact,omitempty"` meta.ReconcileRequestStatus `json:",inline"` } @@ -187,7 +199,7 @@ func (in HelmChart) GetRequeueAfter() time.Duration { // GetArtifact returns the latest artifact from the source if present in the // status sub-resource. -func (in *HelmChart) GetArtifact() *apiv1.Artifact { +func (in *HelmChart) GetArtifact() *meta.Artifact { return in.Status.Artifact } @@ -203,10 +215,10 @@ func (in *HelmChart) GetValuesFiles() []string { } // +genclient -// +kubebuilder:storageversion // +kubebuilder:object:root=true // +kubebuilder:resource:shortName=hc // +kubebuilder:subresource:status +// +kubebuilder:deprecatedversion:warning="v1beta2 HelmChart is deprecated, upgrade to v1" // +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart` // +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version` // +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind` diff --git a/api/v1beta2/helmrepository_types.go b/api/v1beta2/helmrepository_types.go index db965a727..56cbd928c 100644 --- a/api/v1beta2/helmrepository_types.go +++ b/api/v1beta2/helmrepository_types.go @@ -23,8 +23,6 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" - - apiv1 "github.com/fluxcd/source-controller/api/v1" ) const ( @@ -152,7 +150,7 @@ type HelmRepositoryStatus struct { // Artifact represents the last successful HelmRepository reconciliation. // +optional - Artifact *apiv1.Artifact `json:"artifact,omitempty"` + Artifact *meta.Artifact `json:"artifact,omitempty"` meta.ReconcileRequestStatus `json:",inline"` } @@ -193,15 +191,15 @@ func (in HelmRepository) GetTimeout() time.Duration { // GetArtifact returns the latest artifact from the source if present in the // status sub-resource. -func (in *HelmRepository) GetArtifact() *apiv1.Artifact { +func (in *HelmRepository) GetArtifact() *meta.Artifact { return in.Status.Artifact } // +genclient -// +kubebuilder:storageversion // +kubebuilder:object:root=true // +kubebuilder:resource:shortName=helmrepo // +kubebuilder:subresource:status +// +kubebuilder:deprecatedversion:warning="v1beta2 HelmRepository is deprecated, upgrade to v1" // +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" diff --git a/api/v1beta2/ocirepository_types.go b/api/v1beta2/ocirepository_types.go index 581269b1d..760f0d8f1 100644 --- a/api/v1beta2/ocirepository_types.go +++ b/api/v1beta2/ocirepository_types.go @@ -22,6 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/fluxcd/pkg/apis/meta" + apiv1 "github.com/fluxcd/source-controller/api/v1" ) @@ -89,7 +90,7 @@ type OCIRepositorySpec struct { // used to verify the signature and specifies which provider to use to check // whether OCI image is authentic. // +optional - Verify *OCIRepositoryVerification `json:"verify,omitempty"` + Verify *apiv1.OCIRepositoryVerification `json:"verify,omitempty"` // ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate // the image pull if the service account has attached pull secrets. For more information: @@ -115,6 +116,11 @@ type OCIRepositorySpec struct { // +optional CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` + // ProxySecretRef specifies the Secret containing the proxy configuration + // to use while communicating with the container registry. + // +optional + ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"` + // Interval at which the OCIRepository URL is checked for updates. // This interval is approximate and may be subject to jitter to ensure // efficient use of resources. @@ -157,6 +163,10 @@ type OCIRepositoryRef struct { // +optional SemVer string `json:"semver,omitempty"` + // SemverFilter is a regex pattern to filter the tags within the SemVer range. + // +optional + SemverFilter string `json:"semverFilter,omitempty"` + // Tag is the image tag to pull, defaults to latest. // +optional Tag string `json:"tag,omitempty"` @@ -179,41 +189,6 @@ type OCILayerSelector struct { Operation string `json:"operation,omitempty"` } -// OCIRepositoryVerification verifies the authenticity of an OCI Artifact -type OCIRepositoryVerification struct { - // Provider specifies the technology used to sign the OCI Artifact. - // +kubebuilder:validation:Enum=cosign - // +kubebuilder:default:=cosign - Provider string `json:"provider"` - - // SecretRef specifies the Kubernetes Secret containing the - // trusted public keys. - // +optional - SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` - - // MatchOIDCIdentity specifies the identity matching criteria to use - // while verifying an OCI artifact which was signed using Cosign keyless - // signing. The artifact's identity is deemed to be verified if any of the - // specified matchers match against the identity. - // +optional - MatchOIDCIdentity []OIDCIdentityMatch `json:"matchOIDCIdentity,omitempty"` -} - -// OIDCIdentityMatch specifies options for verifying the certificate identity, -// i.e. the issuer and the subject of the certificate. -type OIDCIdentityMatch struct { - // Issuer specifies the regex pattern to match against to verify - // the OIDC issuer in the Fulcio certificate. The pattern must be a - // valid Go regular expression. - // +required - Issuer string `json:"issuer"` - // Subject specifies the regex pattern to match against to verify - // the identity subject in the Fulcio certificate. The pattern must - // be a valid Go regular expression. - // +required - Subject string `json:"subject"` -} - // OCIRepositoryStatus defines the observed state of OCIRepository type OCIRepositoryStatus struct { // ObservedGeneration is the last observed generation. @@ -230,7 +205,7 @@ type OCIRepositoryStatus struct { // Artifact represents the output of the last successful OCI Repository sync. // +optional - Artifact *apiv1.Artifact `json:"artifact,omitempty"` + Artifact *meta.Artifact `json:"artifact,omitempty"` // ContentConfigChecksum is a checksum of all the configurations related to // the content of the source artifact: @@ -285,7 +260,7 @@ func (in OCIRepository) GetRequeueAfter() time.Duration { // GetArtifact returns the latest Artifact from the OCIRepository if present in // the status sub-resource. -func (in *OCIRepository) GetArtifact() *apiv1.Artifact { +func (in *OCIRepository) GetArtifact() *meta.Artifact { return in.Status.Artifact } @@ -308,10 +283,10 @@ func (in *OCIRepository) GetLayerOperation() string { } // +genclient -// +kubebuilder:storageversion // +kubebuilder:object:root=true // +kubebuilder:resource:shortName=ocirepo // +kubebuilder:subresource:status +// +kubebuilder:deprecatedversion:warning="v1beta2 OCIRepository is deprecated, upgrade to v1" // +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" diff --git a/api/v1beta2/sts_types.go b/api/v1beta2/sts_types.go new file mode 100644 index 000000000..c07c05123 --- /dev/null +++ b/api/v1beta2/sts_types.go @@ -0,0 +1,26 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +const ( + // STSProviderAmazon represents the AWS provider for Security Token Service. + // Provides support for fetching temporary credentials from an AWS STS endpoint. + STSProviderAmazon string = "aws" + // STSProviderLDAP represents the LDAP provider for Security Token Service. + // Provides support for fetching temporary credentials from an LDAP endpoint. + STSProviderLDAP string = "ldap" +) diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index e522081f2..0b874dd7e 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -1,8 +1,7 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* -Copyright 2023 The Flux authors +Copyright 2025 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -116,14 +115,54 @@ func (in *BucketList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketSTSSpec) DeepCopyInto(out *BucketSTSSpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSTSSpec. +func (in *BucketSTSSpec) DeepCopy() *BucketSTSSpec { + if in == nil { + return nil + } + out := new(BucketSTSSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BucketSpec) DeepCopyInto(out *BucketSpec) { *out = *in + if in.STS != nil { + in, out := &in.STS, &out.STS + *out = new(BucketSTSSpec) + (*in).DeepCopyInto(*out) + } if in.SecretRef != nil { in, out := &in.SecretRef, &out.SecretRef *out = new(meta.LocalObjectReference) **out = **in } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.ProxySecretRef != nil { + in, out := &in.ProxySecretRef, &out.ProxySecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } out.Interval = in.Interval if in.Timeout != nil { in, out := &in.Timeout, &out.Timeout @@ -164,7 +203,7 @@ func (in *BucketStatus) DeepCopyInto(out *BucketStatus) { } if in.Artifact != nil { in, out := &in.Artifact, &out.Artifact - *out = new(apiv1.Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } if in.ObservedIgnore != nil { @@ -338,16 +377,16 @@ func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) { } if in.Artifact != nil { in, out := &in.Artifact, &out.Artifact - *out = new(apiv1.Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } if in.IncludedArtifacts != nil { in, out := &in.IncludedArtifacts, &out.IncludedArtifacts - *out = make([]*apiv1.Artifact, len(*in)) + *out = make([]*meta.Artifact, len(*in)) for i := range *in { if (*in)[i] != nil { in, out := &(*in)[i], &(*out)[i] - *out = new(apiv1.Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } } @@ -467,7 +506,7 @@ func (in *HelmChartSpec) DeepCopyInto(out *HelmChartSpec) { } if in.Verify != nil { in, out := &in.Verify, &out.Verify - *out = new(OCIRepositoryVerification) + *out = new(apiv1.OCIRepositoryVerification) (*in).DeepCopyInto(*out) } } @@ -485,6 +524,11 @@ func (in *HelmChartSpec) DeepCopy() *HelmChartSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) { *out = *in + if in.ObservedValuesFiles != nil { + in, out := &in.ObservedValuesFiles, &out.ObservedValuesFiles + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]v1.Condition, len(*in)) @@ -494,7 +538,7 @@ func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) { } if in.Artifact != nil { in, out := &in.Artifact, &out.Artifact - *out = new(apiv1.Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } out.ReconcileRequestStatus = in.ReconcileRequestStatus @@ -617,7 +661,7 @@ func (in *HelmRepositoryStatus) DeepCopyInto(out *HelmRepositoryStatus) { } if in.Artifact != nil { in, out := &in.Artifact, &out.Artifact - *out = new(apiv1.Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } out.ReconcileRequestStatus = in.ReconcileRequestStatus @@ -757,7 +801,7 @@ func (in *OCIRepositorySpec) DeepCopyInto(out *OCIRepositorySpec) { } if in.Verify != nil { in, out := &in.Verify, &out.Verify - *out = new(OCIRepositoryVerification) + *out = new(apiv1.OCIRepositoryVerification) (*in).DeepCopyInto(*out) } if in.CertSecretRef != nil { @@ -765,6 +809,11 @@ func (in *OCIRepositorySpec) DeepCopyInto(out *OCIRepositorySpec) { *out = new(meta.LocalObjectReference) **out = **in } + if in.ProxySecretRef != nil { + in, out := &in.ProxySecretRef, &out.ProxySecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } out.Interval = in.Interval if in.Timeout != nil { in, out := &in.Timeout, &out.Timeout @@ -800,7 +849,7 @@ func (in *OCIRepositoryStatus) DeepCopyInto(out *OCIRepositoryStatus) { } if in.Artifact != nil { in, out := &in.Artifact, &out.Artifact - *out = new(apiv1.Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } if in.ObservedIgnore != nil { @@ -825,43 +874,3 @@ func (in *OCIRepositoryStatus) DeepCopy() *OCIRepositoryStatus { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OCIRepositoryVerification) DeepCopyInto(out *OCIRepositoryVerification) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(meta.LocalObjectReference) - **out = **in - } - if in.MatchOIDCIdentity != nil { - in, out := &in.MatchOIDCIdentity, &out.MatchOIDCIdentity - *out = make([]OIDCIdentityMatch, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryVerification. -func (in *OCIRepositoryVerification) DeepCopy() *OCIRepositoryVerification { - if in == nil { - return nil - } - out := new(OCIRepositoryVerification) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OIDCIdentityMatch) DeepCopyInto(out *OIDCIdentityMatch) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCIdentityMatch. -func (in *OIDCIdentityMatch) DeepCopy() *OIDCIdentityMatch { - if in == nil { - return nil - } - out := new(OIDCIdentityMatch) - in.DeepCopyInto(out) - return out -} diff --git a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml index 2ef2fb603..f578c8da0 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: buckets.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -18,94 +18,122 @@ spec: - jsonPath: .spec.endpoint name: Endpoint type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date - jsonPath: .status.conditions[?(@.type=="Ready")].status name: Ready type: string - jsonPath: .status.conditions[?(@.type=="Ready")].message name: Status type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta1 + name: v1 schema: openAPIV3Schema: - description: Bucket is the Schema for the buckets API + description: Bucket is the Schema for the buckets API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: BucketSpec defines the desired state of an S3 compatible - bucket + description: |- + BucketSpec specifies the required configuration to produce an Artifact for + an object storage bucket. properties: - accessFrom: - description: AccessFrom defines an Access Control List for allowing - cross-namespace references to this object. + bucketName: + description: BucketName is the name of the object storage bucket. + type: string + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + bucket. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + This field is only supported for the `generic` provider. properties: - namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors - to which this ACL applies. Items in this list are evaluated - using a logical OR operation. - items: - description: NamespaceSelector selects the namespaces to which - this ACL applies. An empty map of MatchLabels matches all - namespaces in a cluster. - properties: - matchLabels: - additionalProperties: - type: string - description: MatchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - type: array + name: + description: Name of the referent. + type: string required: - - namespaceSelectors + - name type: object - bucketName: - description: The bucket name. - type: string endpoint: - description: The bucket endpoint address. + description: Endpoint is the object storage address the BucketName + is located at. type: string ignore: - description: Ignore overrides the set of excluded patterns in the - .sourceignore format (which is the same as .gitignore). If not provided, - a default will be used, consult the documentation for your version - to find out what those are. + description: |- + Ignore overrides the set of excluded patterns in the .sourceignore format + (which is the same as .gitignore). If not provided, a default will be used, + consult the documentation for your version to find out what those are. type: string insecure: - description: Insecure allows connecting to a non-TLS S3 HTTP endpoint. + description: Insecure allows connecting to a non-TLS HTTP Endpoint. type: boolean interval: - description: The interval at which to check for bucket updates. + description: |- + Interval at which the Bucket Endpoint is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ + type: string + prefix: + description: Prefix to use for server-side filtering of files in the + Bucket. type: string provider: default: generic - description: The S3 compatible storage provider name, default ('generic'). + description: |- + Provider of the object storage bucket. + Defaults to 'generic', which expects an S3 (API) compatible object + storage. enum: - generic - aws - gcp + - azure type: string + proxySecretRef: + description: |- + ProxySecretRef specifies the Secret containing the proxy configuration + to use while communicating with the Bucket server. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object region: - description: The bucket region. + description: Region of the Endpoint where the BucketName is located + in. type: string secretRef: - description: The name of the secret containing authentication credentials + description: |- + SecretRef specifies the Secret containing authentication credentials for the Bucket. properties: name: @@ -114,91 +142,191 @@ spec: required: - name type: object + serviceAccountName: + description: |- + ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate + the bucket. This field is only supported for the 'gcp' and 'aws' providers. + For more information about workload identity: + https://fluxcd.io/flux/components/source/buckets/#workload-identity + type: string + sts: + description: |- + STS specifies the required configuration to use a Security Token + Service for fetching temporary credentials to authenticate in a + Bucket provider. + + This field is only supported for the `aws` and `generic` providers. + properties: + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + STS endpoint. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + This field is only supported for the `ldap` provider. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + endpoint: + description: |- + Endpoint is the HTTP/S endpoint of the Security Token Service from + where temporary credentials will be fetched. + pattern: ^(http|https)://.*$ + type: string + provider: + description: Provider of the Security Token Service. + enum: + - aws + - ldap + type: string + secretRef: + description: |- + SecretRef specifies the Secret containing authentication credentials + for the STS endpoint. This Secret must contain the fields `username` + and `password` and is supported only for the `ldap` provider. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + required: + - endpoint + - provider + type: object suspend: - description: This flag tells the controller to suspend the reconciliation - of this source. + description: |- + Suspend tells the controller to suspend the reconciliation of this + Bucket. type: boolean timeout: default: 60s - description: The timeout for download operations, defaults to 60s. + description: Timeout for fetch operations, defaults to 60s. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ type: string required: - bucketName - endpoint - interval type: object + x-kubernetes-validations: + - message: STS configuration is only supported for the 'aws' and 'generic' + Bucket providers + rule: self.provider == 'aws' || self.provider == 'generic' || !has(self.sts) + - message: '''aws'' is the only supported STS provider for the ''aws'' + Bucket provider' + rule: self.provider != 'aws' || !has(self.sts) || self.sts.provider + == 'aws' + - message: '''ldap'' is the only supported STS provider for the ''generic'' + Bucket provider' + rule: self.provider != 'generic' || !has(self.sts) || self.sts.provider + == 'ldap' + - message: spec.sts.secretRef is not required for the 'aws' STS provider + rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.secretRef)' + - message: spec.sts.certSecretRef is not required for the 'aws' STS provider + rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.certSecretRef)' + - message: ServiceAccountName is not supported for the 'generic' Bucket + provider + rule: self.provider != 'generic' || !has(self.serviceAccountName) + - message: cannot set both .spec.secretRef and .spec.serviceAccountName + rule: '!has(self.secretRef) || !has(self.serviceAccountName)' status: default: observedGeneration: -1 - description: BucketStatus defines the observed state of a bucket + description: BucketStatus records the observed state of a Bucket. properties: artifact: - description: Artifact represents the output of the last successful - Bucket sync. + description: Artifact represents the last successful Bucket reconciliation. properties: - checksum: - description: Checksum is the SHA256 checksum of the artifact. + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of this artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object path: - description: Path is the relative file path of this artifact. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm index timestamp, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer url: - description: URL is the HTTP address of this artifact. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest + - lastUpdateTime - path + - revision - url type: object conditions: description: Conditions holds the conditions for the Bucket. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -213,10 +341,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -229,22 +353,31 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value can - be detected. + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. type: string observedGeneration: - description: ObservedGeneration is the last observed generation. + description: ObservedGeneration is the last observed generation of + the Bucket object. format: int64 type: integer + observedIgnore: + description: |- + ObservedIgnore is the observed exclusion patterns used for constructing + the source artifact. + type: string url: - description: URL is the download link for the artifact output of the - last Bucket sync. + description: |- + URL is the dynamic fetch link for the latest Artifact. + It is provided on a "best effort" basis, and using the precise + BucketStatus.Artifact data is recommended. type: string type: object type: object served: true - storage: false + storage: true subresources: status: {} - additionalPrinterColumns: @@ -260,49 +393,57 @@ spec: - jsonPath: .status.conditions[?(@.type=="Ready")].message name: Status type: string + deprecated: true + deprecationWarning: v1beta2 Bucket is deprecated, upgrade to v1 name: v1beta2 schema: openAPIV3Schema: description: Bucket is the Schema for the buckets API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: BucketSpec specifies the required configuration to produce - an Artifact for an object storage bucket. + description: |- + BucketSpec specifies the required configuration to produce an Artifact for + an object storage bucket. properties: accessFrom: - description: 'AccessFrom specifies an Access Control List for allowing - cross-namespace references to this object. NOTE: Not implemented, - provisional as of https://github.com/fluxcd/flux2/pull/2092' + description: |- + AccessFrom specifies an Access Control List for allowing cross-namespace + references to this object. + NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 properties: namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors - to which this ACL applies. Items in this list are evaluated - using a logical OR operation. + description: |- + NamespaceSelectors is the list of namespace selectors to which this ACL applies. + Items in this list are evaluated using a logical OR operation. items: - description: NamespaceSelector selects the namespaces to which - this ACL applies. An empty map of MatchLabels matches all - namespaces in a cluster. + description: |- + NamespaceSelector selects the namespaces to which this ACL applies. + An empty map of MatchLabels matches all namespaces in a cluster. properties: matchLabels: additionalProperties: type: string - description: MatchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object type: array @@ -312,23 +453,47 @@ spec: bucketName: description: BucketName is the name of the object storage bucket. type: string + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + bucket. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + This field is only supported for the `generic` provider. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object endpoint: description: Endpoint is the object storage address the BucketName is located at. type: string ignore: - description: Ignore overrides the set of excluded patterns in the - .sourceignore format (which is the same as .gitignore). If not provided, - a default will be used, consult the documentation for your version - to find out what those are. + description: |- + Ignore overrides the set of excluded patterns in the .sourceignore format + (which is the same as .gitignore). If not provided, a default will be used, + consult the documentation for your version to find out what those are. type: string insecure: description: Insecure allows connecting to a non-TLS HTTP Endpoint. type: boolean interval: - description: Interval at which the Bucket Endpoint is checked for - updates. This interval is approximate and may be subject to jitter - to ensure efficient use of resources. + description: |- + Interval at which the Bucket Endpoint is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ type: string prefix: @@ -337,21 +502,35 @@ spec: type: string provider: default: generic - description: Provider of the object storage bucket. Defaults to 'generic', - which expects an S3 (API) compatible object storage. + description: |- + Provider of the object storage bucket. + Defaults to 'generic', which expects an S3 (API) compatible object + storage. enum: - generic - aws - gcp - azure type: string + proxySecretRef: + description: |- + ProxySecretRef specifies the Secret containing the proxy configuration + to use while communicating with the Bucket server. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object region: description: Region of the Endpoint where the BucketName is located in. type: string secretRef: - description: SecretRef specifies the Secret containing authentication - credentials for the Bucket. + description: |- + SecretRef specifies the Secret containing authentication credentials + for the Bucket. properties: name: description: Name of the referent. @@ -359,9 +538,69 @@ spec: required: - name type: object + sts: + description: |- + STS specifies the required configuration to use a Security Token + Service for fetching temporary credentials to authenticate in a + Bucket provider. + + This field is only supported for the `aws` and `generic` providers. + properties: + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + STS endpoint. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + This field is only supported for the `ldap` provider. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + endpoint: + description: |- + Endpoint is the HTTP/S endpoint of the Security Token Service from + where temporary credentials will be fetched. + pattern: ^(http|https)://.*$ + type: string + provider: + description: Provider of the Security Token Service. + enum: + - aws + - ldap + type: string + secretRef: + description: |- + SecretRef specifies the Secret containing authentication credentials + for the STS endpoint. This Secret must contain the fields `username` + and `password` and is supported only for the `ldap` provider. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + required: + - endpoint + - provider + type: object suspend: - description: Suspend tells the controller to suspend the reconciliation - of this Bucket. + description: |- + Suspend tells the controller to suspend the reconciliation of this + Bucket. type: boolean timeout: default: 60s @@ -373,6 +612,22 @@ spec: - endpoint - interval type: object + x-kubernetes-validations: + - message: STS configuration is only supported for the 'aws' and 'generic' + Bucket providers + rule: self.provider == 'aws' || self.provider == 'generic' || !has(self.sts) + - message: '''aws'' is the only supported STS provider for the ''aws'' + Bucket provider' + rule: self.provider != 'aws' || !has(self.sts) || self.sts.provider + == 'aws' + - message: '''ldap'' is the only supported STS provider for the ''generic'' + Bucket provider' + rule: self.provider != 'generic' || !has(self.sts) || self.sts.provider + == 'ldap' + - message: spec.sts.secretRef is not required for the 'aws' STS provider + rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.secretRef)' + - message: spec.sts.certSecretRef is not required for the 'aws' STS provider + rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.certSecretRef)' status: default: observedGeneration: -1 @@ -386,8 +641,9 @@ spec: pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of the Artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string metadata: @@ -396,26 +652,28 @@ spec: description: Metadata holds upstream information such as OCI annotations. type: object path: - description: Path is the relative file path of the Artifact. It - can be used to locate the file in the root of the Artifact storage - on the local file system of the controller managing the Source. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human-readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string size: description: Size is the number of bytes in the file. format: int64 type: integer url: - description: URL is the HTTP address of the Artifact as exposed - by the controller managing the Source. It can be used to retrieve - the Artifact for consumption, e.g. by another controller applying - the Artifact contents. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest - lastUpdateTime - path - revision @@ -424,43 +682,35 @@ spec: conditions: description: Conditions holds the conditions for the Bucket. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -475,10 +725,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -491,9 +737,10 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value can - be detected. + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation of @@ -501,17 +748,19 @@ spec: format: int64 type: integer observedIgnore: - description: ObservedIgnore is the observed exclusion patterns used - for constructing the source artifact. + description: |- + ObservedIgnore is the observed exclusion patterns used for constructing + the source artifact. type: string url: - description: URL is the dynamic fetch link for the latest Artifact. - It is provided on a "best effort" basis, and using the precise BucketStatus.Artifact - data is recommended. + description: |- + URL is the dynamic fetch link for the latest Artifact. + It is provided on a "best effort" basis, and using the precise + BucketStatus.Artifact data is recommended. type: string type: object type: object served: true - storage: true + storage: false subresources: status: {} diff --git a/config/crd/bases/source.toolkit.fluxcd.io_externalartifacts.yaml b/config/crd/bases/source.toolkit.fluxcd.io_externalartifacts.yaml new file mode 100644 index 000000000..23cdf63c3 --- /dev/null +++ b/config/crd/bases/source.toolkit.fluxcd.io_externalartifacts.yaml @@ -0,0 +1,191 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: externalartifacts.source.toolkit.fluxcd.io +spec: + group: source.toolkit.fluxcd.io + names: + kind: ExternalArtifact + listKind: ExternalArtifactList + plural: externalartifacts + singular: externalartifact + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + - jsonPath: .spec.sourceRef.name + name: Source + type: string + name: v1 + schema: + openAPIV3Schema: + description: ExternalArtifact is the Schema for the external artifacts API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ExternalArtifactSpec defines the desired state of ExternalArtifact + properties: + sourceRef: + description: |- + SourceRef points to the Kubernetes custom resource for + which the artifact is generated. + properties: + apiVersion: + description: API version of the referent, if not specified the + Kubernetes preferred version will be used. + type: string + kind: + description: Kind of the referent. + type: string + name: + description: Name of the referent. + type: string + namespace: + description: Namespace of the referent, when not specified it + acts as LocalObjectReference. + type: string + required: + - kind + - name + type: object + type: object + status: + description: ExternalArtifactStatus defines the observed state of ExternalArtifact + properties: + artifact: + description: Artifact represents the output of an ExternalArtifact + reconciliation. + properties: + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ + type: string + lastUpdateTime: + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. + format: date-time + type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object + path: + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. + type: string + revision: + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. + type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer + url: + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. + type: string + required: + - digest + - lastUpdateTime + - path + - revision + - url + type: object + conditions: + description: Conditions holds the conditions for the ExternalArtifact. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml index c06124009..10663e473 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: gitrepositories.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -35,42 +35,51 @@ spec: description: GitRepository is the Schema for the gitrepositories API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: GitRepositorySpec specifies the required configuration to - produce an Artifact for a Git repository. + description: |- + GitRepositorySpec specifies the required configuration to produce an + Artifact for a Git repository. properties: ignore: - description: Ignore overrides the set of excluded patterns in the - .sourceignore format (which is the same as .gitignore). If not provided, - a default will be used, consult the documentation for your version - to find out what those are. + description: |- + Ignore overrides the set of excluded patterns in the .sourceignore format + (which is the same as .gitignore). If not provided, a default will be used, + consult the documentation for your version to find out what those are. type: string include: - description: Include specifies a list of GitRepository resources which - Artifacts should be included in the Artifact produced for this GitRepository. + description: |- + Include specifies a list of GitRepository resources which Artifacts + should be included in the Artifact produced for this GitRepository. items: - description: GitRepositoryInclude specifies a local reference to - a GitRepository which Artifact (sub-)contents must be included, - and where they should be placed. + description: |- + GitRepositoryInclude specifies a local reference to a GitRepository which + Artifact (sub-)contents must be included, and where they should be placed. properties: fromPath: - description: FromPath specifies the path to copy contents from, - defaults to the root of the Artifact. + description: |- + FromPath specifies the path to copy contents from, defaults to the root + of the Artifact. type: string repository: - description: GitRepositoryRef specifies the GitRepository which - Artifact contents must be included. + description: |- + GitRepositoryRef specifies the GitRepository which Artifact contents + must be included. properties: name: description: Name of the referent. @@ -79,22 +88,34 @@ spec: - name type: object toPath: - description: ToPath specifies the path to copy contents to, - defaults to the name of the GitRepositoryRef. + description: |- + ToPath specifies the path to copy contents to, defaults to the name of + the GitRepositoryRef. type: string required: - repository type: object type: array interval: - description: Interval at which the GitRepository URL is checked for - updates. This interval is approximate and may be subject to jitter - to ensure efficient use of resources. + description: |- + Interval at which the GitRepository URL is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ type: string + provider: + description: |- + Provider used for authentication, can be 'azure', 'github', 'generic'. + When not specified, defaults to 'generic'. + enum: + - generic + - azure + - github + type: string proxySecretRef: - description: ProxySecretRef specifies the Secret containing the proxy - configuration to use while communicating with the Git server. + description: |- + ProxySecretRef specifies the Secret containing the proxy configuration + to use while communicating with the Git server. properties: name: description: Name of the referent. @@ -103,29 +124,32 @@ spec: - name type: object recurseSubmodules: - description: RecurseSubmodules enables the initialization of all submodules - within the GitRepository as cloned from the URL, using their default - settings. + description: |- + RecurseSubmodules enables the initialization of all submodules within + the GitRepository as cloned from the URL, using their default settings. type: boolean ref: - description: Reference specifies the Git reference to resolve and - monitor for changes, defaults to the 'master' branch. + description: |- + Reference specifies the Git reference to resolve and monitor for + changes, defaults to the 'master' branch. properties: branch: description: Branch to check out, defaults to 'master' if no other field is defined. type: string commit: - description: "Commit SHA to check out, takes precedence over all - reference fields. \n This can be combined with Branch to shallow - clone the branch, in which the commit is expected to exist." + description: |- + Commit SHA to check out, takes precedence over all reference fields. + + This can be combined with Branch to shallow clone the branch, in which + the commit is expected to exist. type: string name: - description: "Name of the reference to check out; takes precedence - over Branch, Tag and SemVer. \n It must be a valid Git reference: - https://git-scm.com/docs/git-check-ref-format#_description Examples: - \"refs/heads/main\", \"refs/tags/v0.1.0\", \"refs/pull/420/head\", - \"refs/merge-requests/1/head\"" + description: |- + Name of the reference to check out; takes precedence over Branch, Tag and SemVer. + + It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description + Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head" type: string semver: description: SemVer tag expression to check out, takes precedence @@ -136,11 +160,13 @@ spec: type: string type: object secretRef: - description: SecretRef specifies the Secret containing authentication - credentials for the GitRepository. For HTTPS repositories the Secret - must contain 'username' and 'password' fields for basic auth or - 'bearerToken' field for token auth. For SSH repositories the Secret - must contain 'identity' and 'known_hosts' fields. + description: |- + SecretRef specifies the Secret containing authentication credentials for + the GitRepository. + For HTTPS repositories the Secret must contain 'username' and 'password' + fields for basic auth or 'bearerToken' field for token auth. + For SSH repositories the Secret must contain 'identity' + and 'known_hosts' fields. properties: name: description: Name of the referent. @@ -148,9 +174,23 @@ spec: required: - name type: object + serviceAccountName: + description: |- + ServiceAccountName is the name of the Kubernetes ServiceAccount used to + authenticate to the GitRepository. This field is only supported for 'azure' provider. + type: string + sparseCheckout: + description: |- + SparseCheckout specifies a list of directories to checkout when cloning + the repository. If specified, only these directories are included in the + Artifact produced for this GitRepository. + items: + type: string + type: array suspend: - description: Suspend tells the controller to suspend the reconciliation - of this GitRepository. + description: |- + Suspend tells the controller to suspend the reconciliation of this + GitRepository. type: boolean timeout: default: 60s @@ -164,15 +204,18 @@ spec: pattern: ^(http|https|ssh)://.*$ type: string verify: - description: Verification specifies the configuration to verify the - Git commit signature(s). + description: |- + Verification specifies the configuration to verify the Git commit + signature(s). properties: mode: default: HEAD - description: "Mode specifies which Git object(s) should be verified. - \n The variants \"head\" and \"HEAD\" both imply the same thing, - i.e. verify the commit that the HEAD of the Git repository points - to. The variant \"head\" solely exists to ensure backwards compatibility." + description: |- + Mode specifies which Git object(s) should be verified. + + The variants "head" and "HEAD" both imply the same thing, i.e. verify + the commit that the HEAD of the Git repository points to. The variant + "head" solely exists to ensure backwards compatibility. enum: - head - HEAD @@ -180,8 +223,9 @@ spec: - TagAndHEAD type: string secretRef: - description: SecretRef specifies the Secret containing the public - keys of trusted Git authors. + description: |- + SecretRef specifies the Secret containing the public keys of trusted Git + authors. properties: name: description: Name of the referent. @@ -196,6 +240,10 @@ spec: - interval - url type: object + x-kubernetes-validations: + - message: serviceAccountName can only be set when provider is 'azure' + rule: '!has(self.serviceAccountName) || (has(self.provider) && self.provider + == ''azure'')' status: default: observedGeneration: -1 @@ -210,8 +258,9 @@ spec: pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of the Artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string metadata: @@ -220,26 +269,28 @@ spec: description: Metadata holds upstream information such as OCI annotations. type: object path: - description: Path is the relative file path of the Artifact. It - can be used to locate the file in the root of the Artifact storage - on the local file system of the controller managing the Source. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human-readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string size: description: Size is the number of bytes in the file. format: int64 type: integer url: - description: URL is the HTTP address of the Artifact as exposed - by the controller managing the Source. It can be used to retrieve - the Artifact for consumption, e.g. by another controller applying - the Artifact contents. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest - lastUpdateTime - path - revision @@ -248,43 +299,35 @@ spec: conditions: description: Conditions holds the conditions for the GitRepository. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -299,10 +342,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -315,8 +354,9 @@ spec: type: object type: array includedArtifacts: - description: IncludedArtifacts contains a list of the last successfully - included Artifacts as instructed by GitRepositorySpec.Include. + description: |- + IncludedArtifacts contains a list of the last successfully included + Artifacts as instructed by GitRepositorySpec.Include. items: description: Artifact represents the output of a Source reconciliation. properties: @@ -326,8 +366,9 @@ spec: pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of the Artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string metadata: @@ -337,27 +378,28 @@ spec: annotations. type: object path: - description: Path is the relative file path of the Artifact. - It can be used to locate the file in the root of the Artifact - storage on the local file system of the controller managing - the Source. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human-readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string size: description: Size is the number of bytes in the file. format: int64 type: integer url: - description: URL is the HTTP address of the Artifact as exposed - by the controller managing the Source. It can be used to retrieve - the Artifact for consumption, e.g. by another controller applying - the Artifact contents. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest - lastUpdateTime - path - revision @@ -365,34 +407,40 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value can - be detected. + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. type: string observedGeneration: - description: ObservedGeneration is the last observed generation of - the GitRepository object. + description: |- + ObservedGeneration is the last observed generation of the GitRepository + object. format: int64 type: integer observedIgnore: - description: ObservedIgnore is the observed exclusion patterns used - for constructing the source artifact. + description: |- + ObservedIgnore is the observed exclusion patterns used for constructing + the source artifact. type: string observedInclude: - description: ObservedInclude is the observed list of GitRepository - resources used to produce the current Artifact. + description: |- + ObservedInclude is the observed list of GitRepository resources used to + produce the current Artifact. items: - description: GitRepositoryInclude specifies a local reference to - a GitRepository which Artifact (sub-)contents must be included, - and where they should be placed. + description: |- + GitRepositoryInclude specifies a local reference to a GitRepository which + Artifact (sub-)contents must be included, and where they should be placed. properties: fromPath: - description: FromPath specifies the path to copy contents from, - defaults to the root of the Artifact. + description: |- + FromPath specifies the path to copy contents from, defaults to the root + of the Artifact. type: string repository: - description: GitRepositoryRef specifies the GitRepository which - Artifact contents must be included. + description: |- + GitRepositoryRef specifies the GitRepository which Artifact contents + must be included. properties: name: description: Name of the referent. @@ -401,357 +449,35 @@ spec: - name type: object toPath: - description: ToPath specifies the path to copy contents to, - defaults to the name of the GitRepositoryRef. + description: |- + ToPath specifies the path to copy contents to, defaults to the name of + the GitRepositoryRef. type: string required: - repository type: object type: array observedRecurseSubmodules: - description: ObservedRecurseSubmodules is the observed resource submodules + description: |- + ObservedRecurseSubmodules is the observed resource submodules configuration used to produce the current Artifact. type: boolean - sourceVerificationMode: - description: SourceVerificationMode is the last used verification - mode indicating which Git object(s) have been verified. - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .spec.url - name: URL - type: string - - jsonPath: .status.conditions[?(@.type=="Ready")].status - name: Ready - type: string - - jsonPath: .status.conditions[?(@.type=="Ready")].message - name: Status - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - deprecated: true - deprecationWarning: v1beta1 GitRepository is deprecated, upgrade to v1 - name: v1beta1 - schema: - openAPIV3Schema: - description: GitRepository is the Schema for the gitrepositories API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: GitRepositorySpec defines the desired state of a Git repository. - properties: - accessFrom: - description: AccessFrom defines an Access Control List for allowing - cross-namespace references to this object. - properties: - namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors - to which this ACL applies. Items in this list are evaluated - using a logical OR operation. - items: - description: NamespaceSelector selects the namespaces to which - this ACL applies. An empty map of MatchLabels matches all - namespaces in a cluster. - properties: - matchLabels: - additionalProperties: - type: string - description: MatchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - type: array - required: - - namespaceSelectors - type: object - gitImplementation: - default: go-git - description: Determines which git client library to use. Defaults - to go-git, valid values are ('go-git', 'libgit2'). - enum: - - go-git - - libgit2 - type: string - ignore: - description: Ignore overrides the set of excluded patterns in the - .sourceignore format (which is the same as .gitignore). If not provided, - a default will be used, consult the documentation for your version - to find out what those are. - type: string - include: - description: Extra git repositories to map into the repository + observedSparseCheckout: + description: |- + ObservedSparseCheckout is the observed list of directories used to + produce the current Artifact. items: - description: GitRepositoryInclude defines a source with a from and - to path. - properties: - fromPath: - description: The path to copy contents from, defaults to the - root directory. - type: string - repository: - description: Reference to a GitRepository to include. - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - toPath: - description: The path to copy contents to, defaults to the name - of the source ref. - type: string - required: - - repository - type: object + type: string type: array - interval: - description: The interval at which to check for repository updates. - type: string - recurseSubmodules: - description: When enabled, after the clone is created, initializes - all submodules within, using their default settings. This option - is available only when using the 'go-git' GitImplementation. - type: boolean - ref: - description: The Git reference to checkout and monitor for changes, - defaults to master branch. - properties: - branch: - description: The Git branch to checkout, defaults to master. - type: string - commit: - description: The Git commit SHA to checkout, if specified Tag - filters will be ignored. - type: string - semver: - description: The Git tag semver expression, takes precedence over - Tag. - type: string - tag: - description: The Git tag to checkout, takes precedence over Branch. - type: string - type: object - secretRef: - description: The secret name containing the Git credentials. For HTTPS - repositories the secret must contain username and password fields. - For SSH repositories the secret must contain identity and known_hosts - fields. - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - suspend: - description: This flag tells the controller to suspend the reconciliation - of this source. - type: boolean - timeout: - default: 60s - description: The timeout for remote Git operations like cloning, defaults - to 60s. - type: string - url: - description: The repository URL, can be a HTTP/S or SSH address. - pattern: ^(http|https|ssh)://.*$ - type: string - verify: - description: Verify OpenPGP signature for the Git commit HEAD points - to. - properties: - mode: - description: Mode describes what git object should be verified, - currently ('head'). - enum: - - head - type: string - secretRef: - description: The secret name containing the public keys of all - trusted Git authors. - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - required: - - mode - type: object - required: - - interval - - url - type: object - status: - default: - observedGeneration: -1 - description: GitRepositoryStatus defines the observed state of a Git repository. - properties: - artifact: - description: Artifact represents the output of the last successful - repository sync. - properties: - checksum: - description: Checksum is the SHA256 checksum of the artifact. - type: string - lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of this artifact. - format: date-time - type: string - path: - description: Path is the relative file path of this artifact. - type: string - revision: - description: Revision is a human readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm index timestamp, a Helm chart version, etc. - type: string - url: - description: URL is the HTTP address of this artifact. - type: string - required: - - path - - url - type: object - conditions: - description: Conditions holds the conditions for the GitRepository. - items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" - properties: - lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - includedArtifacts: - description: IncludedArtifacts represents the included artifacts from - the last successful repository sync. - items: - description: Artifact represents the output of a source synchronisation. - properties: - checksum: - description: Checksum is the SHA256 checksum of the artifact. - type: string - lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of this artifact. - format: date-time - type: string - path: - description: Path is the relative file path of this artifact. - type: string - revision: - description: Revision is a human readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm index timestamp, a Helm chart version, etc. - type: string - url: - description: URL is the HTTP address of this artifact. - type: string - required: - - path - - url - type: object - type: array - lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value can - be detected. - type: string - observedGeneration: - description: ObservedGeneration is the last observed generation. - format: int64 - type: integer - url: - description: URL is the download link for the artifact output of the - last repository sync. + sourceVerificationMode: + description: |- + SourceVerificationMode is the last used verification mode indicating + which Git object(s) have been verified. type: string type: object type: object served: true - storage: false + storage: true subresources: status: {} - additionalPrinterColumns: @@ -775,43 +501,49 @@ spec: description: GitRepository is the Schema for the gitrepositories API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: GitRepositorySpec specifies the required configuration to - produce an Artifact for a Git repository. + description: |- + GitRepositorySpec specifies the required configuration to produce an + Artifact for a Git repository. properties: accessFrom: - description: 'AccessFrom specifies an Access Control List for allowing - cross-namespace references to this object. NOTE: Not implemented, - provisional as of https://github.com/fluxcd/flux2/pull/2092' + description: |- + AccessFrom specifies an Access Control List for allowing cross-namespace + references to this object. + NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 properties: namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors - to which this ACL applies. Items in this list are evaluated - using a logical OR operation. + description: |- + NamespaceSelectors is the list of namespace selectors to which this ACL applies. + Items in this list are evaluated using a logical OR operation. items: - description: NamespaceSelector selects the namespaces to which - this ACL applies. An empty map of MatchLabels matches all - namespaces in a cluster. + description: |- + NamespaceSelector selects the namespaces to which this ACL applies. + An empty map of MatchLabels matches all namespaces in a cluster. properties: matchLabels: additionalProperties: type: string - description: MatchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object type: array @@ -820,35 +552,39 @@ spec: type: object gitImplementation: default: go-git - description: 'GitImplementation specifies which Git client library - implementation to use. Defaults to ''go-git'', valid values are - (''go-git'', ''libgit2''). Deprecated: gitImplementation is deprecated - now that ''go-git'' is the only supported implementation.' + description: |- + GitImplementation specifies which Git client library implementation to + use. Defaults to 'go-git', valid values are ('go-git', 'libgit2'). + Deprecated: gitImplementation is deprecated now that 'go-git' is the + only supported implementation. enum: - go-git - libgit2 type: string ignore: - description: Ignore overrides the set of excluded patterns in the - .sourceignore format (which is the same as .gitignore). If not provided, - a default will be used, consult the documentation for your version - to find out what those are. + description: |- + Ignore overrides the set of excluded patterns in the .sourceignore format + (which is the same as .gitignore). If not provided, a default will be used, + consult the documentation for your version to find out what those are. type: string include: - description: Include specifies a list of GitRepository resources which - Artifacts should be included in the Artifact produced for this GitRepository. + description: |- + Include specifies a list of GitRepository resources which Artifacts + should be included in the Artifact produced for this GitRepository. items: - description: GitRepositoryInclude specifies a local reference to - a GitRepository which Artifact (sub-)contents must be included, - and where they should be placed. + description: |- + GitRepositoryInclude specifies a local reference to a GitRepository which + Artifact (sub-)contents must be included, and where they should be placed. properties: fromPath: - description: FromPath specifies the path to copy contents from, - defaults to the root of the Artifact. + description: |- + FromPath specifies the path to copy contents from, defaults to the root + of the Artifact. type: string repository: - description: GitRepositoryRef specifies the GitRepository which - Artifact contents must be included. + description: |- + GitRepositoryRef specifies the GitRepository which Artifact contents + must be included. properties: name: description: Name of the referent. @@ -857,8 +593,9 @@ spec: - name type: object toPath: - description: ToPath specifies the path to copy contents to, - defaults to the name of the GitRepositoryRef. + description: |- + ToPath specifies the path to copy contents to, defaults to the name of + the GitRepositoryRef. type: string required: - repository @@ -869,29 +606,32 @@ spec: pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ type: string recurseSubmodules: - description: RecurseSubmodules enables the initialization of all submodules - within the GitRepository as cloned from the URL, using their default - settings. + description: |- + RecurseSubmodules enables the initialization of all submodules within + the GitRepository as cloned from the URL, using their default settings. type: boolean ref: - description: Reference specifies the Git reference to resolve and - monitor for changes, defaults to the 'master' branch. + description: |- + Reference specifies the Git reference to resolve and monitor for + changes, defaults to the 'master' branch. properties: branch: description: Branch to check out, defaults to 'master' if no other field is defined. type: string commit: - description: "Commit SHA to check out, takes precedence over all - reference fields. \n This can be combined with Branch to shallow - clone the branch, in which the commit is expected to exist." + description: |- + Commit SHA to check out, takes precedence over all reference fields. + + This can be combined with Branch to shallow clone the branch, in which + the commit is expected to exist. type: string name: - description: "Name of the reference to check out; takes precedence - over Branch, Tag and SemVer. \n It must be a valid Git reference: - https://git-scm.com/docs/git-check-ref-format#_description Examples: - \"refs/heads/main\", \"refs/tags/v0.1.0\", \"refs/pull/420/head\", - \"refs/merge-requests/1/head\"" + description: |- + Name of the reference to check out; takes precedence over Branch, Tag and SemVer. + + It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description + Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head" type: string semver: description: SemVer tag expression to check out, takes precedence @@ -902,11 +642,13 @@ spec: type: string type: object secretRef: - description: SecretRef specifies the Secret containing authentication - credentials for the GitRepository. For HTTPS repositories the Secret - must contain 'username' and 'password' fields for basic auth or - 'bearerToken' field for token auth. For SSH repositories the Secret - must contain 'identity' and 'known_hosts' fields. + description: |- + SecretRef specifies the Secret containing authentication credentials for + the GitRepository. + For HTTPS repositories the Secret must contain 'username' and 'password' + fields for basic auth or 'bearerToken' field for token auth. + For SSH repositories the Secret must contain 'identity' + and 'known_hosts' fields. properties: name: description: Name of the referent. @@ -915,8 +657,9 @@ spec: - name type: object suspend: - description: Suspend tells the controller to suspend the reconciliation - of this GitRepository. + description: |- + Suspend tells the controller to suspend the reconciliation of this + GitRepository. type: boolean timeout: default: 60s @@ -930,8 +673,9 @@ spec: pattern: ^(http|https|ssh)://.*$ type: string verify: - description: Verification specifies the configuration to verify the - Git commit signature(s). + description: |- + Verification specifies the configuration to verify the Git commit + signature(s). properties: mode: description: Mode specifies what Git object should be verified, @@ -940,8 +684,9 @@ spec: - head type: string secretRef: - description: SecretRef specifies the Secret containing the public - keys of trusted Git authors. + description: |- + SecretRef specifies the Secret containing the public keys of trusted Git + authors. properties: name: description: Name of the referent. @@ -971,8 +716,9 @@ spec: pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of the Artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string metadata: @@ -981,26 +727,28 @@ spec: description: Metadata holds upstream information such as OCI annotations. type: object path: - description: Path is the relative file path of the Artifact. It - can be used to locate the file in the root of the Artifact storage - on the local file system of the controller managing the Source. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human-readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string size: description: Size is the number of bytes in the file. format: int64 type: integer url: - description: URL is the HTTP address of the Artifact as exposed - by the controller managing the Source. It can be used to retrieve - the Artifact for consumption, e.g. by another controller applying - the Artifact contents. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest - lastUpdateTime - path - revision @@ -1009,43 +757,35 @@ spec: conditions: description: Conditions holds the conditions for the GitRepository. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -1060,10 +800,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -1076,18 +812,24 @@ spec: type: object type: array contentConfigChecksum: - description: "ContentConfigChecksum is a checksum of all the configurations - related to the content of the source artifact: - .spec.ignore - - .spec.recurseSubmodules - .spec.included and the checksum of the - included artifacts observed in .status.observedGeneration version - of the object. This can be used to determine if the content of the - included repository has changed. It has the format of `:`, - for example: `sha256:`. \n Deprecated: Replaced with explicit - fields for observed artifact content config in the status." + description: |- + ContentConfigChecksum is a checksum of all the configurations related to + the content of the source artifact: + - .spec.ignore + - .spec.recurseSubmodules + - .spec.included and the checksum of the included artifacts + observed in .status.observedGeneration version of the object. This can + be used to determine if the content of the included repository has + changed. + It has the format of `:`, for example: `sha256:`. + + Deprecated: Replaced with explicit fields for observed artifact content + config in the status. type: string includedArtifacts: - description: IncludedArtifacts contains a list of the last successfully - included Artifacts as instructed by GitRepositorySpec.Include. + description: |- + IncludedArtifacts contains a list of the last successfully included + Artifacts as instructed by GitRepositorySpec.Include. items: description: Artifact represents the output of a Source reconciliation. properties: @@ -1097,8 +839,9 @@ spec: pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of the Artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string metadata: @@ -1108,27 +851,28 @@ spec: annotations. type: object path: - description: Path is the relative file path of the Artifact. - It can be used to locate the file in the root of the Artifact - storage on the local file system of the controller managing - the Source. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human-readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string size: description: Size is the number of bytes in the file. format: int64 type: integer url: - description: URL is the HTTP address of the Artifact as exposed - by the controller managing the Source. It can be used to retrieve - the Artifact for consumption, e.g. by another controller applying - the Artifact contents. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest - lastUpdateTime - path - revision @@ -1136,34 +880,40 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value can - be detected. + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. type: string observedGeneration: - description: ObservedGeneration is the last observed generation of - the GitRepository object. + description: |- + ObservedGeneration is the last observed generation of the GitRepository + object. format: int64 type: integer observedIgnore: - description: ObservedIgnore is the observed exclusion patterns used - for constructing the source artifact. + description: |- + ObservedIgnore is the observed exclusion patterns used for constructing + the source artifact. type: string observedInclude: - description: ObservedInclude is the observed list of GitRepository - resources used to to produce the current Artifact. + description: |- + ObservedInclude is the observed list of GitRepository resources used to + to produce the current Artifact. items: - description: GitRepositoryInclude specifies a local reference to - a GitRepository which Artifact (sub-)contents must be included, - and where they should be placed. + description: |- + GitRepositoryInclude specifies a local reference to a GitRepository which + Artifact (sub-)contents must be included, and where they should be placed. properties: fromPath: - description: FromPath specifies the path to copy contents from, - defaults to the root of the Artifact. + description: |- + FromPath specifies the path to copy contents from, defaults to the root + of the Artifact. type: string repository: - description: GitRepositoryRef specifies the GitRepository which - Artifact contents must be included. + description: |- + GitRepositoryRef specifies the GitRepository which Artifact contents + must be included. properties: name: description: Name of the referent. @@ -1172,21 +922,24 @@ spec: - name type: object toPath: - description: ToPath specifies the path to copy contents to, - defaults to the name of the GitRepositoryRef. + description: |- + ToPath specifies the path to copy contents to, defaults to the name of + the GitRepositoryRef. type: string required: - repository type: object type: array observedRecurseSubmodules: - description: ObservedRecurseSubmodules is the observed resource submodules + description: |- + ObservedRecurseSubmodules is the observed resource submodules configuration used to produce the current Artifact. type: boolean url: - description: URL is the dynamic fetch link for the latest Artifact. - It is provided on a "best effort" basis, and using the precise GitRepositoryStatus.Artifact - data is recommended. + description: |- + URL is the dynamic fetch link for the latest Artifact. + It is provided on a "best effort" basis, and using the precise + GitRepositoryStatus.Artifact data is recommended. type: string type: object type: object diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml index 49bdcdd93..0e57c72a5 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: helmcharts.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -29,88 +29,79 @@ spec: - jsonPath: .spec.sourceRef.name name: Source Name type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date - jsonPath: .status.conditions[?(@.type=="Ready")].status name: Ready type: string - jsonPath: .status.conditions[?(@.type=="Ready")].message name: Status type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta1 + name: v1 schema: openAPIV3Schema: - description: HelmChart is the Schema for the helmcharts API + description: HelmChart is the Schema for the helmcharts API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: HelmChartSpec defines the desired state of a Helm chart. + description: HelmChartSpec specifies the desired state of a Helm chart. properties: - accessFrom: - description: AccessFrom defines an Access Control List for allowing - cross-namespace references to this object. - properties: - namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors - to which this ACL applies. Items in this list are evaluated - using a logical OR operation. - items: - description: NamespaceSelector selects the namespaces to which - this ACL applies. An empty map of MatchLabels matches all - namespaces in a cluster. - properties: - matchLabels: - additionalProperties: - type: string - description: MatchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. - type: object - type: object - type: array - required: - - namespaceSelectors - type: object chart: - description: The name or path the Helm chart is available at in the + description: |- + Chart is the name or path the Helm chart is available at in the SourceRef. type: string + ignoreMissingValuesFiles: + description: |- + IgnoreMissingValuesFiles controls whether to silently ignore missing values + files rather than failing. + type: boolean interval: - description: The interval at which to check the Source for updates. + description: |- + Interval at which the HelmChart SourceRef is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ type: string reconcileStrategy: default: ChartVersion - description: Determines what enables the creation of a new artifact. - Valid values are ('ChartVersion', 'Revision'). See the documentation - of the values for an explanation on their behavior. Defaults to - ChartVersion when omitted. + description: |- + ReconcileStrategy determines what enables the creation of a new artifact. + Valid values are ('ChartVersion', 'Revision'). + See the documentation of the values for an explanation on their behavior. + Defaults to ChartVersion when omitted. enum: - ChartVersion - Revision type: string sourceRef: - description: The reference to the Source the chart is available at. + description: SourceRef is the reference to the Source the chart is + available at. properties: apiVersion: description: APIVersion of the referent. type: string kind: - description: Kind of the referent, valid values are ('HelmRepository', - 'GitRepository', 'Bucket'). + description: |- + Kind of the referent, valid values are ('HelmRepository', 'GitRepository', + 'Bucket'). enum: - HelmRepository - GitRepository @@ -124,28 +115,83 @@ spec: - name type: object suspend: - description: This flag tells the controller to suspend the reconciliation - of this source. + description: |- + Suspend tells the controller to suspend the reconciliation of this + source. type: boolean - valuesFile: - description: Alternative values file to use as the default chart values, - expected to be a relative path in the SourceRef. Deprecated in favor - of ValuesFiles, for backwards compatibility the file defined here - is merged before the ValuesFiles items. Ignored when omitted. - type: string valuesFiles: - description: Alternative list of values files to use as the chart - values (values.yaml is not included by default), expected to be - a relative path in the SourceRef. Values files are merged in the - order of this list with the last file overriding the first. Ignored - when omitted. + description: |- + ValuesFiles is an alternative list of values files to use as the chart + values (values.yaml is not included by default), expected to be a + relative path in the SourceRef. + Values files are merged in the order of this list with the last file + overriding the first. Ignored when omitted. items: type: string type: array + verify: + description: |- + Verify contains the secret name containing the trusted public keys + used to verify the signature and specifies which provider to use to check + whether OCI image is authentic. + This field is only supported when using HelmRepository source with spec.type 'oci'. + Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified. + properties: + matchOIDCIdentity: + description: |- + MatchOIDCIdentity specifies the identity matching criteria to use + while verifying an OCI artifact which was signed using Cosign keyless + signing. The artifact's identity is deemed to be verified if any of the + specified matchers match against the identity. + items: + description: |- + OIDCIdentityMatch specifies options for verifying the certificate identity, + i.e. the issuer and the subject of the certificate. + properties: + issuer: + description: |- + Issuer specifies the regex pattern to match against to verify + the OIDC issuer in the Fulcio certificate. The pattern must be a + valid Go regular expression. + type: string + subject: + description: |- + Subject specifies the regex pattern to match against to verify + the identity subject in the Fulcio certificate. The pattern must + be a valid Go regular expression. + type: string + required: + - issuer + - subject + type: object + type: array + provider: + default: cosign + description: Provider specifies the technology used to sign the + OCI Artifact. + enum: + - cosign + - notation + type: string + secretRef: + description: |- + SecretRef specifies the Kubernetes Secret containing the + trusted public keys. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + required: + - provider + type: object version: default: '*' - description: The chart version semver expression, ignored for charts - from GitRepository and Bucket sources. Defaults to latest when omitted. + description: |- + Version is the chart version semver expression, ignored for charts from + GitRepository and Bucket sources. Defaults to latest when omitted. type: string required: - chart @@ -155,75 +201,87 @@ spec: status: default: observedGeneration: -1 - description: HelmChartStatus defines the observed state of the HelmChart. + description: HelmChartStatus records the observed state of the HelmChart. properties: artifact: description: Artifact represents the output of the last successful - chart sync. + reconciliation. properties: - checksum: - description: Checksum is the SHA256 checksum of the artifact. + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of this artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object path: - description: Path is the relative file path of this artifact. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm index timestamp, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer url: - description: URL is the HTTP address of this artifact. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest + - lastUpdateTime - path + - revision - url type: object conditions: description: Conditions holds the conditions for the HelmChart. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -238,10 +296,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -254,21 +308,45 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value can - be detected. + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. + type: string + observedChartName: + description: |- + ObservedChartName is the last observed chart name as specified by the + resolved chart reference. type: string observedGeneration: - description: ObservedGeneration is the last observed generation. + description: |- + ObservedGeneration is the last observed generation of the HelmChart + object. format: int64 type: integer + observedSourceArtifactRevision: + description: |- + ObservedSourceArtifactRevision is the last observed Artifact.Revision + of the HelmChartSpec.SourceRef. + type: string + observedValuesFiles: + description: |- + ObservedValuesFiles are the observed value files of the last successful + reconciliation. + It matches the chart in the last successfully reconciled artifact. + items: + type: string + type: array url: - description: URL is the download link for the last chart pulled. + description: |- + URL is the dynamic fetch link for the latest Artifact. + It is provided on a "best effort" basis, and using the precise + BucketStatus.Artifact data is recommended. type: string type: object type: object served: true - storage: false + storage: true subresources: status: {} - additionalPrinterColumns: @@ -293,20 +371,27 @@ spec: - jsonPath: .status.conditions[?(@.type=="Ready")].message name: Status type: string + deprecated: true + deprecationWarning: v1beta2 HelmChart is deprecated, upgrade to v1 name: v1beta2 schema: openAPIV3Schema: description: HelmChart is the Schema for the helmcharts API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -314,27 +399,27 @@ spec: description: HelmChartSpec specifies the desired state of a Helm chart. properties: accessFrom: - description: 'AccessFrom specifies an Access Control List for allowing - cross-namespace references to this object. NOTE: Not implemented, - provisional as of https://github.com/fluxcd/flux2/pull/2092' + description: |- + AccessFrom specifies an Access Control List for allowing cross-namespace + references to this object. + NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 properties: namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors - to which this ACL applies. Items in this list are evaluated - using a logical OR operation. + description: |- + NamespaceSelectors is the list of namespace selectors to which this ACL applies. + Items in this list are evaluated using a logical OR operation. items: - description: NamespaceSelector selects the namespaces to which - this ACL applies. An empty map of MatchLabels matches all - namespaces in a cluster. + description: |- + NamespaceSelector selects the namespaces to which this ACL applies. + An empty map of MatchLabels matches all namespaces in a cluster. properties: matchLabels: additionalProperties: type: string - description: MatchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object type: array @@ -342,21 +427,29 @@ spec: - namespaceSelectors type: object chart: - description: Chart is the name or path the Helm chart is available - at in the SourceRef. + description: |- + Chart is the name or path the Helm chart is available at in the + SourceRef. type: string + ignoreMissingValuesFiles: + description: |- + IgnoreMissingValuesFiles controls whether to silently ignore missing values + files rather than failing. + type: boolean interval: - description: Interval at which the HelmChart SourceRef is checked - for updates. This interval is approximate and may be subject to - jitter to ensure efficient use of resources. + description: |- + Interval at which the HelmChart SourceRef is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ type: string reconcileStrategy: default: ChartVersion - description: ReconcileStrategy determines what enables the creation - of a new artifact. Valid values are ('ChartVersion', 'Revision'). - See the documentation of the values for an explanation on their - behavior. Defaults to ChartVersion when omitted. + description: |- + ReconcileStrategy determines what enables the creation of a new artifact. + Valid values are ('ChartVersion', 'Revision'). + See the documentation of the values for an explanation on their behavior. + Defaults to ChartVersion when omitted. enum: - ChartVersion - Revision @@ -369,8 +462,9 @@ spec: description: APIVersion of the referent. type: string kind: - description: Kind of the referent, valid values are ('HelmRepository', - 'GitRepository', 'Bucket'). + description: |- + Kind of the referent, valid values are ('HelmRepository', 'GitRepository', + 'Bucket'). enum: - HelmRepository - GitRepository @@ -384,53 +478,57 @@ spec: - name type: object suspend: - description: Suspend tells the controller to suspend the reconciliation - of this source. + description: |- + Suspend tells the controller to suspend the reconciliation of this + source. type: boolean valuesFile: - description: ValuesFile is an alternative values file to use as the - default chart values, expected to be a relative path in the SourceRef. - Deprecated in favor of ValuesFiles, for backwards compatibility - the file specified here is merged before the ValuesFiles items. - Ignored when omitted. + description: |- + ValuesFile is an alternative values file to use as the default chart + values, expected to be a relative path in the SourceRef. Deprecated in + favor of ValuesFiles, for backwards compatibility the file specified here + is merged before the ValuesFiles items. Ignored when omitted. type: string valuesFiles: - description: ValuesFiles is an alternative list of values files to - use as the chart values (values.yaml is not included by default), - expected to be a relative path in the SourceRef. Values files are - merged in the order of this list with the last file overriding the - first. Ignored when omitted. + description: |- + ValuesFiles is an alternative list of values files to use as the chart + values (values.yaml is not included by default), expected to be a + relative path in the SourceRef. + Values files are merged in the order of this list with the last file + overriding the first. Ignored when omitted. items: type: string type: array verify: - description: Verify contains the secret name containing the trusted - public keys used to verify the signature and specifies which provider - to use to check whether OCI image is authentic. This field is only - supported when using HelmRepository source with spec.type 'oci'. - Chart dependencies, which are not bundled in the umbrella chart - artifact, are not verified. + description: |- + Verify contains the secret name containing the trusted public keys + used to verify the signature and specifies which provider to use to check + whether OCI image is authentic. + This field is only supported when using HelmRepository source with spec.type 'oci'. + Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified. properties: matchOIDCIdentity: - description: MatchOIDCIdentity specifies the identity matching - criteria to use while verifying an OCI artifact which was signed - using Cosign keyless signing. The artifact's identity is deemed - to be verified if any of the specified matchers match against - the identity. + description: |- + MatchOIDCIdentity specifies the identity matching criteria to use + while verifying an OCI artifact which was signed using Cosign keyless + signing. The artifact's identity is deemed to be verified if any of the + specified matchers match against the identity. items: - description: OIDCIdentityMatch specifies options for verifying - the certificate identity, i.e. the issuer and the subject - of the certificate. + description: |- + OIDCIdentityMatch specifies options for verifying the certificate identity, + i.e. the issuer and the subject of the certificate. properties: issuer: - description: Issuer specifies the regex pattern to match - against to verify the OIDC issuer in the Fulcio certificate. - The pattern must be a valid Go regular expression. + description: |- + Issuer specifies the regex pattern to match against to verify + the OIDC issuer in the Fulcio certificate. The pattern must be a + valid Go regular expression. type: string subject: - description: Subject specifies the regex pattern to match - against to verify the identity subject in the Fulcio certificate. - The pattern must be a valid Go regular expression. + description: |- + Subject specifies the regex pattern to match against to verify + the identity subject in the Fulcio certificate. The pattern must + be a valid Go regular expression. type: string required: - issuer @@ -443,10 +541,12 @@ spec: OCI Artifact. enum: - cosign + - notation type: string secretRef: - description: SecretRef specifies the Kubernetes Secret containing - the trusted public keys. + description: |- + SecretRef specifies the Kubernetes Secret containing the + trusted public keys. properties: name: description: Name of the referent. @@ -459,9 +559,9 @@ spec: type: object version: default: '*' - description: Version is the chart version semver expression, ignored - for charts from GitRepository and Bucket sources. Defaults to latest - when omitted. + description: |- + Version is the chart version semver expression, ignored for charts from + GitRepository and Bucket sources. Defaults to latest when omitted. type: string required: - chart @@ -482,8 +582,9 @@ spec: pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of the Artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string metadata: @@ -492,26 +593,28 @@ spec: description: Metadata holds upstream information such as OCI annotations. type: object path: - description: Path is the relative file path of the Artifact. It - can be used to locate the file in the root of the Artifact storage - on the local file system of the controller managing the Source. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human-readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string size: description: Size is the number of bytes in the file. format: int64 type: integer url: - description: URL is the HTTP address of the Artifact as exposed - by the controller managing the Source. It can be used to retrieve - the Artifact for consumption, e.g. by another controller applying - the Artifact contents. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest - lastUpdateTime - path - revision @@ -520,43 +623,35 @@ spec: conditions: description: Conditions holds the conditions for the HelmChart. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -571,10 +666,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -587,31 +678,44 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value can - be detected. + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. type: string observedChartName: - description: ObservedChartName is the last observed chart name as - specified by the resolved chart reference. + description: |- + ObservedChartName is the last observed chart name as specified by the + resolved chart reference. type: string observedGeneration: - description: ObservedGeneration is the last observed generation of - the HelmChart object. + description: |- + ObservedGeneration is the last observed generation of the HelmChart + object. format: int64 type: integer observedSourceArtifactRevision: - description: ObservedSourceArtifactRevision is the last observed Artifact.Revision + description: |- + ObservedSourceArtifactRevision is the last observed Artifact.Revision of the HelmChartSpec.SourceRef. type: string + observedValuesFiles: + description: |- + ObservedValuesFiles are the observed value files of the last successful + reconciliation. + It matches the chart in the last successfully reconciled artifact. + items: + type: string + type: array url: - description: URL is the dynamic fetch link for the latest Artifact. - It is provided on a "best effort" basis, and using the precise BucketStatus.Artifact - data is recommended. + description: |- + URL is the dynamic fetch link for the latest Artifact. + It is provided on a "best effort" basis, and using the precise + BucketStatus.Artifact data is recommended. type: string type: object type: object served: true - storage: true + storage: false subresources: status: {} diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml index 7eb709b94..750a36500 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: helmrepositories.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -20,78 +20,135 @@ spec: - jsonPath: .spec.url name: URL type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date - jsonPath: .status.conditions[?(@.type=="Ready")].status name: Ready type: string - jsonPath: .status.conditions[?(@.type=="Ready")].message name: Status type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta1 + name: v1 schema: openAPIV3Schema: - description: HelmRepository is the Schema for the helmrepositories API + description: HelmRepository is the Schema for the helmrepositories API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: HelmRepositorySpec defines the reference to a Helm repository. + description: |- + HelmRepositorySpec specifies the required configuration to produce an + Artifact for a Helm repository index YAML. properties: accessFrom: - description: AccessFrom defines an Access Control List for allowing - cross-namespace references to this object. + description: |- + AccessFrom specifies an Access Control List for allowing cross-namespace + references to this object. + NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 properties: namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors - to which this ACL applies. Items in this list are evaluated - using a logical OR operation. + description: |- + NamespaceSelectors is the list of namespace selectors to which this ACL applies. + Items in this list are evaluated using a logical OR operation. items: - description: NamespaceSelector selects the namespaces to which - this ACL applies. An empty map of MatchLabels matches all - namespaces in a cluster. + description: |- + NamespaceSelector selects the namespaces to which this ACL applies. + An empty map of MatchLabels matches all namespaces in a cluster. properties: matchLabels: additionalProperties: type: string - description: MatchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object type: array required: - namespaceSelectors type: object + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + registry. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + It takes precedence over the values specified in the Secret referred + to by `.spec.secretRef`. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + insecure: + description: |- + Insecure allows connecting to a non-TLS HTTP container registry. + This field is only taken into account if the .spec.type field is set to 'oci'. + type: boolean interval: - description: The interval at which to check the upstream for updates. + description: |- + Interval at which the HelmRepository URL is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ type: string passCredentials: - description: PassCredentials allows the credentials from the SecretRef - to be passed on to a host that does not match the host as defined - in URL. This may be required if the host of the advertised chart - URLs in the index differ from the defined URL. Enabling this should - be done with caution, as it can potentially result in credentials - getting stolen in a MITM-attack. + description: |- + PassCredentials allows the credentials from the SecretRef to be passed + on to a host that does not match the host as defined in URL. + This may be required if the host of the advertised chart URLs in the + index differ from the defined URL. + Enabling this should be done with caution, as it can potentially result + in credentials getting stolen in a MITM-attack. type: boolean + provider: + default: generic + description: |- + Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + This field is optional, and only taken into account if the .spec.type field is set to 'oci'. + When not specified, defaults to 'generic'. + enum: + - generic + - aws + - azure + - gcp + type: string secretRef: - description: The name of the secret containing authentication credentials - for the Helm repository. For HTTP/S basic auth the secret must contain - username and password fields. For TLS the secret must contain a - certFile and keyFile, and/or caFile fields. + description: |- + SecretRef specifies the Secret containing authentication credentials + for the HelmRepository. + For HTTP/S basic auth the secret must contain 'username' and 'password' + fields. + Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile' + keys is deprecated. Please use `.spec.certSecretRef` instead. properties: name: description: Name of the referent. @@ -100,93 +157,119 @@ spec: - name type: object suspend: - description: This flag tells the controller to suspend the reconciliation - of this source. + description: |- + Suspend tells the controller to suspend the reconciliation of this + HelmRepository. type: boolean timeout: - default: 60s - description: The timeout of index downloading, defaults to 60s. + description: |- + Timeout is used for the index fetch operation for an HTTPS helm repository, + and for remote OCI Repository operations like pulling for an OCI helm + chart by the associated HelmChart. + Its default value is 60s. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ + type: string + type: + description: |- + Type of the HelmRepository. + When this field is set to "oci", the URL field value must be prefixed with "oci://". + enum: + - default + - oci type: string url: - description: The Helm repository URL, a valid URL contains at least - a protocol and host. + description: |- + URL of the Helm repository, a valid URL contains at least a protocol and + host. + pattern: ^(http|https|oci)://.*$ type: string required: - - interval - url type: object status: default: observedGeneration: -1 - description: HelmRepositoryStatus defines the observed state of the HelmRepository. + description: HelmRepositoryStatus records the observed state of the HelmRepository. properties: artifact: - description: Artifact represents the output of the last successful - repository sync. + description: Artifact represents the last successful HelmRepository + reconciliation. properties: - checksum: - description: Checksum is the SHA256 checksum of the artifact. + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of this artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object path: - description: Path is the relative file path of this artifact. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm index timestamp, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer url: - description: URL is the HTTP address of this artifact. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest + - lastUpdateTime - path + - revision - url type: object conditions: description: Conditions holds the conditions for the HelmRepository. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -201,10 +284,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -217,21 +296,27 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value can - be detected. + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. type: string observedGeneration: - description: ObservedGeneration is the last observed generation. + description: |- + ObservedGeneration is the last observed generation of the HelmRepository + object. format: int64 type: integer url: - description: URL is the download link for the last index fetched. + description: |- + URL is the dynamic fetch link for the latest Artifact. + It is provided on a "best effort" basis, and using the precise + HelmRepositoryStatus.Artifact data is recommended. type: string type: object type: object served: true - storage: false + storage: true subresources: status: {} - additionalPrinterColumns: @@ -247,49 +332,57 @@ spec: - jsonPath: .status.conditions[?(@.type=="Ready")].message name: Status type: string + deprecated: true + deprecationWarning: v1beta2 HelmRepository is deprecated, upgrade to v1 name: v1beta2 schema: openAPIV3Schema: description: HelmRepository is the Schema for the helmrepositories API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: HelmRepositorySpec specifies the required configuration to - produce an Artifact for a Helm repository index YAML. + description: |- + HelmRepositorySpec specifies the required configuration to produce an + Artifact for a Helm repository index YAML. properties: accessFrom: - description: 'AccessFrom specifies an Access Control List for allowing - cross-namespace references to this object. NOTE: Not implemented, - provisional as of https://github.com/fluxcd/flux2/pull/2092' + description: |- + AccessFrom specifies an Access Control List for allowing cross-namespace + references to this object. + NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 properties: namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors - to which this ACL applies. Items in this list are evaluated - using a logical OR operation. + description: |- + NamespaceSelectors is the list of namespace selectors to which this ACL applies. + Items in this list are evaluated using a logical OR operation. items: - description: NamespaceSelector selects the namespaces to which - this ACL applies. An empty map of MatchLabels matches all - namespaces in a cluster. + description: |- + NamespaceSelector selects the namespaces to which this ACL applies. + An empty map of MatchLabels matches all namespaces in a cluster. properties: matchLabels: additionalProperties: type: string - description: MatchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object type: array @@ -297,15 +390,22 @@ spec: - namespaceSelectors type: object certSecretRef: - description: "CertSecretRef can be given the name of a Secret containing - either or both of \n - a PEM-encoded client certificate (`tls.crt`) - and private key (`tls.key`); - a PEM-encoded CA certificate (`ca.crt`) - \n and whichever are supplied, will be used for connecting to the - registry. The client cert and key are useful if you are authenticating - with a certificate; the CA cert is useful if you are using a self-signed - server certificate. The Secret must be of type `Opaque` or `kubernetes.io/tls`. - \n It takes precedence over the values specified in the Secret referred - to by `.spec.secretRef`." + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + registry. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + It takes precedence over the values specified in the Secret referred + to by `.spec.secretRef`. properties: name: description: Name of the referent. @@ -314,30 +414,32 @@ spec: - name type: object insecure: - description: Insecure allows connecting to a non-TLS HTTP container - registry. This field is only taken into account if the .spec.type - field is set to 'oci'. + description: |- + Insecure allows connecting to a non-TLS HTTP container registry. + This field is only taken into account if the .spec.type field is set to 'oci'. type: boolean interval: - description: Interval at which the HelmRepository URL is checked for - updates. This interval is approximate and may be subject to jitter - to ensure efficient use of resources. + description: |- + Interval at which the HelmRepository URL is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ type: string passCredentials: - description: PassCredentials allows the credentials from the SecretRef - to be passed on to a host that does not match the host as defined - in URL. This may be required if the host of the advertised chart - URLs in the index differ from the defined URL. Enabling this should - be done with caution, as it can potentially result in credentials - getting stolen in a MITM-attack. + description: |- + PassCredentials allows the credentials from the SecretRef to be passed + on to a host that does not match the host as defined in URL. + This may be required if the host of the advertised chart URLs in the + index differ from the defined URL. + Enabling this should be done with caution, as it can potentially result + in credentials getting stolen in a MITM-attack. type: boolean provider: default: generic - description: Provider used for authentication, can be 'aws', 'azure', - 'gcp' or 'generic'. This field is optional, and only taken into - account if the .spec.type field is set to 'oci'. When not specified, - defaults to 'generic'. + description: |- + Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + This field is optional, and only taken into account if the .spec.type field is set to 'oci'. + When not specified, defaults to 'generic'. enum: - generic - aws @@ -345,11 +447,13 @@ spec: - gcp type: string secretRef: - description: SecretRef specifies the Secret containing authentication - credentials for the HelmRepository. For HTTP/S basic auth the secret - must contain 'username' and 'password' fields. Support for TLS auth - using the 'certFile' and 'keyFile', and/or 'caFile' keys is deprecated. - Please use `.spec.certSecretRef` instead. + description: |- + SecretRef specifies the Secret containing authentication credentials + for the HelmRepository. + For HTTP/S basic auth the secret must contain 'username' and 'password' + fields. + Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile' + keys is deprecated. Please use `.spec.certSecretRef` instead. properties: name: description: Name of the referent. @@ -358,26 +462,30 @@ spec: - name type: object suspend: - description: Suspend tells the controller to suspend the reconciliation - of this HelmRepository. + description: |- + Suspend tells the controller to suspend the reconciliation of this + HelmRepository. type: boolean timeout: - description: Timeout is used for the index fetch operation for an - HTTPS helm repository, and for remote OCI Repository operations - like pulling for an OCI helm chart by the associated HelmChart. + description: |- + Timeout is used for the index fetch operation for an HTTPS helm repository, + and for remote OCI Repository operations like pulling for an OCI helm + chart by the associated HelmChart. Its default value is 60s. pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ type: string type: - description: Type of the HelmRepository. When this field is set to "oci", - the URL field value must be prefixed with "oci://". + description: |- + Type of the HelmRepository. + When this field is set to "oci", the URL field value must be prefixed with "oci://". enum: - default - oci type: string url: - description: URL of the Helm repository, a valid URL contains at least - a protocol and host. + description: |- + URL of the Helm repository, a valid URL contains at least a protocol and + host. pattern: ^(http|https|oci)://.*$ type: string required: @@ -397,8 +505,9 @@ spec: pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of the Artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string metadata: @@ -407,26 +516,28 @@ spec: description: Metadata holds upstream information such as OCI annotations. type: object path: - description: Path is the relative file path of the Artifact. It - can be used to locate the file in the root of the Artifact storage - on the local file system of the controller managing the Source. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human-readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string size: description: Size is the number of bytes in the file. format: int64 type: integer url: - description: URL is the HTTP address of the Artifact as exposed - by the controller managing the Source. It can be used to retrieve - the Artifact for consumption, e.g. by another controller applying - the Artifact contents. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest - lastUpdateTime - path - revision @@ -435,43 +546,35 @@ spec: conditions: description: Conditions holds the conditions for the HelmRepository. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -486,10 +589,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -502,23 +601,26 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value can - be detected. + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. type: string observedGeneration: - description: ObservedGeneration is the last observed generation of - the HelmRepository object. + description: |- + ObservedGeneration is the last observed generation of the HelmRepository + object. format: int64 type: integer url: - description: URL is the dynamic fetch link for the latest Artifact. - It is provided on a "best effort" basis, and using the precise HelmRepositoryStatus.Artifact - data is recommended. + description: |- + URL is the dynamic fetch link for the latest Artifact. + It is provided on a "best effort" basis, and using the precise + HelmRepositoryStatus.Artifact data is recommended. type: string type: object type: object served: true - storage: true + storage: false subresources: status: {} diff --git a/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml index b795c8fda..05b7b96ab 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.19.0 name: ocirepositories.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -29,20 +29,420 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date + name: v1 + schema: + openAPIV3Schema: + description: OCIRepository is the Schema for the ocirepositories API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OCIRepositorySpec defines the desired state of OCIRepository + properties: + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + registry. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + ignore: + description: |- + Ignore overrides the set of excluded patterns in the .sourceignore format + (which is the same as .gitignore). If not provided, a default will be used, + consult the documentation for your version to find out what those are. + type: string + insecure: + description: Insecure allows connecting to a non-TLS HTTP container + registry. + type: boolean + interval: + description: |- + Interval at which the OCIRepository URL is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ + type: string + layerSelector: + description: |- + LayerSelector specifies which layer should be extracted from the OCI artifact. + When not specified, the first layer found in the artifact is selected. + properties: + mediaType: + description: |- + MediaType specifies the OCI media type of the layer + which should be extracted from the OCI Artifact. The + first layer matching this type is selected. + type: string + operation: + description: |- + Operation specifies how the selected layer should be processed. + By default, the layer compressed content is extracted to storage. + When the operation is set to 'copy', the layer compressed content + is persisted to storage as it is. + enum: + - extract + - copy + type: string + type: object + provider: + default: generic + description: |- + The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + When not specified, defaults to 'generic'. + enum: + - generic + - aws + - azure + - gcp + type: string + proxySecretRef: + description: |- + ProxySecretRef specifies the Secret containing the proxy configuration + to use while communicating with the container registry. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + ref: + description: |- + The OCI reference to pull and monitor for changes, + defaults to the latest tag. + properties: + digest: + description: |- + Digest is the image digest to pull, takes precedence over SemVer. + The value should be in the format 'sha256:'. + type: string + semver: + description: |- + SemVer is the range of tags to pull selecting the latest within + the range, takes precedence over Tag. + type: string + semverFilter: + description: SemverFilter is a regex pattern to filter the tags + within the SemVer range. + type: string + tag: + description: Tag is the image tag to pull, defaults to latest. + type: string + type: object + secretRef: + description: |- + SecretRef contains the secret name containing the registry login + credentials to resolve image metadata. + The secret must be of type kubernetes.io/dockerconfigjson. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + serviceAccountName: + description: |- + ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate + the image pull if the service account has attached pull secrets. For more information: + https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account + type: string + suspend: + description: This flag tells the controller to suspend the reconciliation + of this source. + type: boolean + timeout: + default: 60s + description: The timeout for remote OCI Repository operations like + pulling, defaults to 60s. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ + type: string + url: + description: |- + URL is a reference to an OCI artifact repository hosted + on a remote container registry. + pattern: ^oci://.*$ + type: string + verify: + description: |- + Verify contains the secret name containing the trusted public keys + used to verify the signature and specifies which provider to use to check + whether OCI image is authentic. + properties: + matchOIDCIdentity: + description: |- + MatchOIDCIdentity specifies the identity matching criteria to use + while verifying an OCI artifact which was signed using Cosign keyless + signing. The artifact's identity is deemed to be verified if any of the + specified matchers match against the identity. + items: + description: |- + OIDCIdentityMatch specifies options for verifying the certificate identity, + i.e. the issuer and the subject of the certificate. + properties: + issuer: + description: |- + Issuer specifies the regex pattern to match against to verify + the OIDC issuer in the Fulcio certificate. The pattern must be a + valid Go regular expression. + type: string + subject: + description: |- + Subject specifies the regex pattern to match against to verify + the identity subject in the Fulcio certificate. The pattern must + be a valid Go regular expression. + type: string + required: + - issuer + - subject + type: object + type: array + provider: + default: cosign + description: Provider specifies the technology used to sign the + OCI Artifact. + enum: + - cosign + - notation + type: string + secretRef: + description: |- + SecretRef specifies the Kubernetes Secret containing the + trusted public keys. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + required: + - provider + type: object + required: + - interval + - url + type: object + status: + default: + observedGeneration: -1 + description: OCIRepositoryStatus defines the observed state of OCIRepository + properties: + artifact: + description: Artifact represents the output of the last successful + OCI Repository sync. + properties: + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ + type: string + lastUpdateTime: + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. + format: date-time + type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object + path: + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. + type: string + revision: + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. + type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer + url: + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. + type: string + required: + - digest + - lastUpdateTime + - path + - revision + - url + type: object + conditions: + description: Conditions holds the conditions for the OCIRepository. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastHandledReconcileAt: + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. + type: string + observedGeneration: + description: ObservedGeneration is the last observed generation. + format: int64 + type: integer + observedIgnore: + description: |- + ObservedIgnore is the observed exclusion patterns used for constructing + the source artifact. + type: string + observedLayerSelector: + description: |- + ObservedLayerSelector is the observed layer selector used for constructing + the source artifact. + properties: + mediaType: + description: |- + MediaType specifies the OCI media type of the layer + which should be extracted from the OCI Artifact. The + first layer matching this type is selected. + type: string + operation: + description: |- + Operation specifies how the selected layer should be processed. + By default, the layer compressed content is extracted to storage. + When the operation is set to 'copy', the layer compressed content + is persisted to storage as it is. + enum: + - extract + - copy + type: string + type: object + url: + description: URL is the download link for the artifact output of the + last OCI Repository sync. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.url + name: URL + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + deprecated: true + deprecationWarning: v1beta2 OCIRepository is deprecated, upgrade to v1 name: v1beta2 schema: openAPIV3Schema: description: OCIRepository is the Schema for the ocirepositories API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -50,15 +450,22 @@ spec: description: OCIRepositorySpec defines the desired state of OCIRepository properties: certSecretRef: - description: "CertSecretRef can be given the name of a Secret containing - either or both of \n - a PEM-encoded client certificate (`tls.crt`) - and private key (`tls.key`); - a PEM-encoded CA certificate (`ca.crt`) - \n and whichever are supplied, will be used for connecting to the - registry. The client cert and key are useful if you are authenticating - with a certificate; the CA cert is useful if you are using a self-signed - server certificate. The Secret must be of type `Opaque` or `kubernetes.io/tls`. - \n Note: Support for the `caFile`, `certFile` and `keyFile` keys - have been deprecated." + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + registry. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + Note: Support for the `caFile`, `certFile` and `keyFile` keys have + been deprecated. properties: name: description: Name of the referent. @@ -67,36 +474,39 @@ spec: - name type: object ignore: - description: Ignore overrides the set of excluded patterns in the - .sourceignore format (which is the same as .gitignore). If not provided, - a default will be used, consult the documentation for your version - to find out what those are. + description: |- + Ignore overrides the set of excluded patterns in the .sourceignore format + (which is the same as .gitignore). If not provided, a default will be used, + consult the documentation for your version to find out what those are. type: string insecure: description: Insecure allows connecting to a non-TLS HTTP container registry. type: boolean interval: - description: Interval at which the OCIRepository URL is checked for - updates. This interval is approximate and may be subject to jitter - to ensure efficient use of resources. + description: |- + Interval at which the OCIRepository URL is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ type: string layerSelector: - description: LayerSelector specifies which layer should be extracted - from the OCI artifact. When not specified, the first layer found - in the artifact is selected. + description: |- + LayerSelector specifies which layer should be extracted from the OCI artifact. + When not specified, the first layer found in the artifact is selected. properties: mediaType: - description: MediaType specifies the OCI media type of the layer - which should be extracted from the OCI Artifact. The first layer - matching this type is selected. + description: |- + MediaType specifies the OCI media type of the layer + which should be extracted from the OCI Artifact. The + first layer matching this type is selected. type: string operation: - description: Operation specifies how the selected layer should - be processed. By default, the layer compressed content is extracted - to storage. When the operation is set to 'copy', the layer compressed - content is persisted to storage as it is. + description: |- + Operation specifies how the selected layer should be processed. + By default, the layer compressed content is extracted to storage. + When the operation is set to 'copy', the layer compressed content + is persisted to storage as it is. enum: - extract - copy @@ -104,34 +514,54 @@ spec: type: object provider: default: generic - description: The provider used for authentication, can be 'aws', 'azure', - 'gcp' or 'generic'. When not specified, defaults to 'generic'. + description: |- + The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + When not specified, defaults to 'generic'. enum: - generic - aws - azure - gcp type: string + proxySecretRef: + description: |- + ProxySecretRef specifies the Secret containing the proxy configuration + to use while communicating with the container registry. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object ref: - description: The OCI reference to pull and monitor for changes, defaults - to the latest tag. + description: |- + The OCI reference to pull and monitor for changes, + defaults to the latest tag. properties: digest: - description: Digest is the image digest to pull, takes precedence - over SemVer. The value should be in the format 'sha256:'. + description: |- + Digest is the image digest to pull, takes precedence over SemVer. + The value should be in the format 'sha256:'. type: string semver: - description: SemVer is the range of tags to pull selecting the - latest within the range, takes precedence over Tag. + description: |- + SemVer is the range of tags to pull selecting the latest within + the range, takes precedence over Tag. + type: string + semverFilter: + description: SemverFilter is a regex pattern to filter the tags + within the SemVer range. type: string tag: description: Tag is the image tag to pull, defaults to latest. type: string type: object secretRef: - description: SecretRef contains the secret name containing the registry - login credentials to resolve image metadata. The secret must be - of type kubernetes.io/dockerconfigjson. + description: |- + SecretRef contains the secret name containing the registry login + credentials to resolve image metadata. + The secret must be of type kubernetes.io/dockerconfigjson. properties: name: description: Name of the referent. @@ -140,9 +570,10 @@ spec: - name type: object serviceAccountName: - description: 'ServiceAccountName is the name of the Kubernetes ServiceAccount - used to authenticate the image pull if the service account has attached - pull secrets. For more information: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account' + description: |- + ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate + the image pull if the service account has attached pull secrets. For more information: + https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account type: string suspend: description: This flag tells the controller to suspend the reconciliation @@ -155,35 +586,39 @@ spec: pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ type: string url: - description: URL is a reference to an OCI artifact repository hosted + description: |- + URL is a reference to an OCI artifact repository hosted on a remote container registry. pattern: ^oci://.*$ type: string verify: - description: Verify contains the secret name containing the trusted - public keys used to verify the signature and specifies which provider - to use to check whether OCI image is authentic. + description: |- + Verify contains the secret name containing the trusted public keys + used to verify the signature and specifies which provider to use to check + whether OCI image is authentic. properties: matchOIDCIdentity: - description: MatchOIDCIdentity specifies the identity matching - criteria to use while verifying an OCI artifact which was signed - using Cosign keyless signing. The artifact's identity is deemed - to be verified if any of the specified matchers match against - the identity. + description: |- + MatchOIDCIdentity specifies the identity matching criteria to use + while verifying an OCI artifact which was signed using Cosign keyless + signing. The artifact's identity is deemed to be verified if any of the + specified matchers match against the identity. items: - description: OIDCIdentityMatch specifies options for verifying - the certificate identity, i.e. the issuer and the subject - of the certificate. + description: |- + OIDCIdentityMatch specifies options for verifying the certificate identity, + i.e. the issuer and the subject of the certificate. properties: issuer: - description: Issuer specifies the regex pattern to match - against to verify the OIDC issuer in the Fulcio certificate. - The pattern must be a valid Go regular expression. + description: |- + Issuer specifies the regex pattern to match against to verify + the OIDC issuer in the Fulcio certificate. The pattern must be a + valid Go regular expression. type: string subject: - description: Subject specifies the regex pattern to match - against to verify the identity subject in the Fulcio certificate. - The pattern must be a valid Go regular expression. + description: |- + Subject specifies the regex pattern to match against to verify + the identity subject in the Fulcio certificate. The pattern must + be a valid Go regular expression. type: string required: - issuer @@ -196,10 +631,12 @@ spec: OCI Artifact. enum: - cosign + - notation type: string secretRef: - description: SecretRef specifies the Kubernetes Secret containing - the trusted public keys. + description: |- + SecretRef specifies the Kubernetes Secret containing the + trusted public keys. properties: name: description: Name of the referent. @@ -228,8 +665,9 @@ spec: pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to - the last update of the Artifact. + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. format: date-time type: string metadata: @@ -238,26 +676,28 @@ spec: description: Metadata holds upstream information such as OCI annotations. type: object path: - description: Path is the relative file path of the Artifact. It - can be used to locate the file in the root of the Artifact storage - on the local file system of the controller managing the Source. + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. type: string revision: - description: Revision is a human-readable identifier traceable - in the origin source system. It can be a Git commit SHA, Git - tag, a Helm chart version, etc. + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. type: string size: description: Size is the number of bytes in the file. format: int64 type: integer url: - description: URL is the HTTP address of the Artifact as exposed - by the controller managing the Source. It can be used to retrieve - the Artifact for consumption, e.g. by another controller applying - the Artifact contents. + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest - lastUpdateTime - path - revision @@ -266,43 +706,35 @@ spec: conditions: description: Conditions holds the conditions for the OCIRepository. items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -317,10 +749,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -333,42 +761,51 @@ spec: type: object type: array contentConfigChecksum: - description: "ContentConfigChecksum is a checksum of all the configurations - related to the content of the source artifact: - .spec.ignore - - .spec.layerSelector observed in .status.observedGeneration version - of the object. This can be used to determine if the content configuration - has changed and the artifact needs to be rebuilt. It has the format - of `:`, for example: `sha256:`. \n Deprecated: - Replaced with explicit fields for observed artifact content config - in the status." + description: |- + ContentConfigChecksum is a checksum of all the configurations related to + the content of the source artifact: + - .spec.ignore + - .spec.layerSelector + observed in .status.observedGeneration version of the object. This can + be used to determine if the content configuration has changed and the + artifact needs to be rebuilt. + It has the format of `:`, for example: `sha256:`. + + Deprecated: Replaced with explicit fields for observed artifact content + config in the status. type: string lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value can - be detected. + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. format: int64 type: integer observedIgnore: - description: ObservedIgnore is the observed exclusion patterns used - for constructing the source artifact. + description: |- + ObservedIgnore is the observed exclusion patterns used for constructing + the source artifact. type: string observedLayerSelector: - description: ObservedLayerSelector is the observed layer selector - used for constructing the source artifact. + description: |- + ObservedLayerSelector is the observed layer selector used for constructing + the source artifact. properties: mediaType: - description: MediaType specifies the OCI media type of the layer - which should be extracted from the OCI Artifact. The first layer - matching this type is selected. + description: |- + MediaType specifies the OCI media type of the layer + which should be extracted from the OCI Artifact. The + first layer matching this type is selected. type: string operation: - description: Operation specifies how the selected layer should - be processed. By default, the layer compressed content is extracted - to storage. When the operation is set to 'copy', the layer compressed - content is persisted to storage as it is. + description: |- + Operation specifies how the selected layer should be processed. + By default, the layer compressed content is extracted to storage. + When the operation is set to 'copy', the layer compressed content + is persisted to storage as it is. enum: - extract - copy @@ -381,6 +818,6 @@ spec: type: object type: object served: true - storage: true + storage: false subresources: status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index c00716353..2a09dbfd5 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -6,4 +6,5 @@ resources: - bases/source.toolkit.fluxcd.io_helmcharts.yaml - bases/source.toolkit.fluxcd.io_buckets.yaml - bases/source.toolkit.fluxcd.io_ocirepositories.yaml +- bases/source.toolkit.fluxcd.io_externalartifacts.yaml # +kubebuilder:scaffold:crdkustomizeresource diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index f8c76696c..0118ce85b 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -6,4 +6,4 @@ resources: images: - name: fluxcd/source-controller newName: fluxcd/source-controller - newTag: v1.2.0 + newTag: v1.7.0 diff --git a/config/rbac/externalartifact_editor_role.yaml b/config/rbac/externalartifact_editor_role.yaml new file mode 100644 index 000000000..ded6c1d93 --- /dev/null +++ b/config/rbac/externalartifact_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit externalartifacts. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: externalartifact-editor-role +rules: +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - externalartifacts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - externalartifacts/status + verbs: + - get diff --git a/config/rbac/externalartifact_viewer_role.yaml b/config/rbac/externalartifact_viewer_role.yaml new file mode 100644 index 000000000..d0c1d507f --- /dev/null +++ b/config/rbac/externalartifact_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view externalartifacts. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: externalartifacts-viewer-role +rules: +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - externalartifacts + verbs: + - get + - list + - watch +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - externalartifacts/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 8bd710bef..d2cd9e7cb 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -15,133 +15,24 @@ rules: - "" resources: - secrets + - serviceaccounts verbs: - get - list - watch - apiGroups: - - source.toolkit.fluxcd.io + - "" resources: - - buckets + - serviceaccounts/token verbs: - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - buckets/finalizers - verbs: - - create - - delete - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - buckets/status - verbs: - - get - - patch - - update - apiGroups: - source.toolkit.fluxcd.io resources: + - buckets - gitrepositories - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - gitrepositories/finalizers - verbs: - - create - - delete - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - gitrepositories/status - verbs: - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - helmcharts - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - helmcharts/finalizers - verbs: - - create - - delete - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - helmcharts/status - verbs: - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - helmrepositories - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - helmrepositories/finalizers - verbs: - - create - - delete - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - helmrepositories/status - verbs: - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - ocirepositories verbs: - create @@ -154,6 +45,10 @@ rules: - apiGroups: - source.toolkit.fluxcd.io resources: + - buckets/finalizers + - gitrepositories/finalizers + - helmcharts/finalizers + - helmrepositories/finalizers - ocirepositories/finalizers verbs: - create @@ -164,6 +59,10 @@ rules: - apiGroups: - source.toolkit.fluxcd.io resources: + - buckets/status + - gitrepositories/status + - helmcharts/status + - helmrepositories/status - ocirepositories/status verbs: - get diff --git a/config/samples/source_v1beta2_bucket.yaml b/config/samples/source_v1_bucket.yaml similarity index 81% rename from config/samples/source_v1beta2_bucket.yaml rename to config/samples/source_v1_bucket.yaml index cbc211aa6..f09cbe213 100644 --- a/config/samples/source_v1beta2_bucket.yaml +++ b/config/samples/source_v1_bucket.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: Bucket metadata: name: bucket-sample diff --git a/config/samples/source_v1beta2_gitrepository.yaml b/config/samples/source_v1_gitrepository.yaml similarity index 100% rename from config/samples/source_v1beta2_gitrepository.yaml rename to config/samples/source_v1_gitrepository.yaml diff --git a/config/samples/source_v1beta2_helmchart_gitrepository.yaml b/config/samples/source_v1_helmchart_gitrepository.yaml similarity index 78% rename from config/samples/source_v1beta2_helmchart_gitrepository.yaml rename to config/samples/source_v1_helmchart_gitrepository.yaml index 731d8d21b..680e7b184 100644 --- a/config/samples/source_v1beta2_helmchart_gitrepository.yaml +++ b/config/samples/source_v1_helmchart_gitrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: helmchart-git-sample diff --git a/config/samples/source_v1beta2_helmchart_helmrepository-oci.yaml b/config/samples/source_v1_helmchart_helmrepository-oci.yaml similarity index 82% rename from config/samples/source_v1beta2_helmchart_helmrepository-oci.yaml rename to config/samples/source_v1_helmchart_helmrepository-oci.yaml index d2cdc15c6..d9dd3279d 100644 --- a/config/samples/source_v1beta2_helmchart_helmrepository-oci.yaml +++ b/config/samples/source_v1_helmchart_helmrepository-oci.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: helmchart-sample-oci diff --git a/config/samples/source_v1beta2_helmchart_helmrepository.yaml b/config/samples/source_v1_helmchart_helmrepository.yaml similarity index 63% rename from config/samples/source_v1beta2_helmchart_helmrepository.yaml rename to config/samples/source_v1_helmchart_helmrepository.yaml index a6bd7c207..d1b43fe3e 100644 --- a/config/samples/source_v1beta2_helmchart_helmrepository.yaml +++ b/config/samples/source_v1_helmchart_helmrepository.yaml @@ -1,11 +1,12 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: helmchart-sample spec: chart: podinfo - version: '>=2.0.0 <3.0.0' + version: '6.x' sourceRef: kind: HelmRepository name: helmrepository-sample interval: 1m + ignoreMissingValuesFiles: true diff --git a/config/samples/source_v1beta2_helmrepository-oci.yaml b/config/samples/source_v1_helmrepository-oci.yaml similarity index 72% rename from config/samples/source_v1beta2_helmrepository-oci.yaml rename to config/samples/source_v1_helmrepository-oci.yaml index bc487c990..458dc73c2 100644 --- a/config/samples/source_v1beta2_helmrepository-oci.yaml +++ b/config/samples/source_v1_helmrepository-oci.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: helmrepository-sample-oci diff --git a/config/samples/source_v1beta2_helmrepository.yaml b/config/samples/source_v1_helmrepository.yaml similarity index 73% rename from config/samples/source_v1beta2_helmrepository.yaml rename to config/samples/source_v1_helmrepository.yaml index 4a2c7ab36..b7049cc0a 100644 --- a/config/samples/source_v1beta2_helmrepository.yaml +++ b/config/samples/source_v1_helmrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: helmrepository-sample diff --git a/config/samples/source_v1beta2_ocirepository.yaml b/config/samples/source_v1_ocirepository.yaml similarity index 77% rename from config/samples/source_v1beta2_ocirepository.yaml rename to config/samples/source_v1_ocirepository.yaml index e06241b97..69fb19e2a 100644 --- a/config/samples/source_v1beta2_ocirepository.yaml +++ b/config/samples/source_v1_ocirepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: OCIRepository metadata: name: ocirepository-sample diff --git a/config/testdata/bucket/source.yaml b/config/testdata/bucket/source.yaml index 459e7400a..bd3097ee2 100644 --- a/config/testdata/bucket/source.yaml +++ b/config/testdata/bucket/source.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: Bucket metadata: name: podinfo diff --git a/config/testdata/git/large-repo.yaml b/config/testdata/git/large-repo.yaml index 003784fa0..ad3defd68 100644 --- a/config/testdata/git/large-repo.yaml +++ b/config/testdata/git/large-repo.yaml @@ -1,13 +1,10 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: GitRepository metadata: name: large-repo spec: interval: 10m timeout: 2m - url: https://github.com/hashgraph/hedera-mirror-node.git + url: https://github.com/nodejs/node.git ref: branch: main - ignore: | - /* - !/charts diff --git a/config/testdata/helmchart-from-bucket/source.yaml b/config/testdata/helmchart-from-bucket/source.yaml index 0609cf541..814305d13 100644 --- a/config/testdata/helmchart-from-bucket/source.yaml +++ b/config/testdata/helmchart-from-bucket/source.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: Bucket metadata: name: charts @@ -13,7 +13,7 @@ spec: secretRef: name: minio-credentials --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: helmchart-bucket diff --git a/config/testdata/helmchart-from-oci/notation.yaml b/config/testdata/helmchart-from-oci/notation.yaml new file mode 100644 index 000000000..6434479ea --- /dev/null +++ b/config/testdata/helmchart-from-oci/notation.yaml @@ -0,0 +1,25 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: podinfo-notation +spec: + url: oci://ghcr.io/stefanprodan/charts + type: "oci" + interval: 1m +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: podinfo-notation +spec: + chart: podinfo + sourceRef: + kind: HelmRepository + name: podinfo-notation + version: '6.6.0' + interval: 1m + verify: + provider: notation + secretRef: + name: notation-config diff --git a/config/testdata/helmchart-from-oci/source.yaml b/config/testdata/helmchart-from-oci/source.yaml index 354325efa..b2786531e 100644 --- a/config/testdata/helmchart-from-oci/source.yaml +++ b/config/testdata/helmchart-from-oci/source.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: podinfo @@ -8,7 +8,7 @@ spec: type: "oci" interval: 1m --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: podinfo @@ -20,7 +20,7 @@ spec: version: '6.1.*' interval: 1m --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: podinfo-keyless diff --git a/config/testdata/helmchart-valuesfile/gitrepository.yaml b/config/testdata/helmchart-valuesfile/gitrepository.yaml index b620c8560..279979e93 100644 --- a/config/testdata/helmchart-valuesfile/gitrepository.yaml +++ b/config/testdata/helmchart-valuesfile/gitrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: GitRepository metadata: name: podinfo diff --git a/config/testdata/helmchart-valuesfile/helmchart_gitrepository.yaml b/config/testdata/helmchart-valuesfile/helmchart_gitrepository.yaml index 4483f0ca8..3c26b3eb5 100644 --- a/config/testdata/helmchart-valuesfile/helmchart_gitrepository.yaml +++ b/config/testdata/helmchart-valuesfile/helmchart_gitrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: podinfo-git @@ -8,6 +8,5 @@ spec: kind: GitRepository name: podinfo chart: charts/podinfo - valuesFile: charts/podinfo/values.yaml valuesFiles: - charts/podinfo/values-prod.yaml diff --git a/config/testdata/helmchart-valuesfile/helmchart_helmrepository.yaml b/config/testdata/helmchart-valuesfile/helmchart_helmrepository.yaml index fdf34f6bf..0b004eb7a 100644 --- a/config/testdata/helmchart-valuesfile/helmchart_helmrepository.yaml +++ b/config/testdata/helmchart-valuesfile/helmchart_helmrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: podinfo @@ -8,6 +8,5 @@ spec: kind: HelmRepository name: podinfo chart: podinfo - valuesFile: values.yaml valuesFiles: - values-prod.yaml diff --git a/config/testdata/helmchart-valuesfile/helmrepository.yaml b/config/testdata/helmchart-valuesfile/helmrepository.yaml index ab568384c..f0c178695 100644 --- a/config/testdata/helmchart-valuesfile/helmrepository.yaml +++ b/config/testdata/helmchart-valuesfile/helmrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: podinfo diff --git a/config/testdata/ocirepository/signed-with-key.yaml b/config/testdata/ocirepository/signed-with-key.yaml index 7a2bd3c2c..0a3a652ee 100644 --- a/config/testdata/ocirepository/signed-with-key.yaml +++ b/config/testdata/ocirepository/signed-with-key.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: OCIRepository metadata: name: podinfo-deploy-signed-with-key diff --git a/config/testdata/ocirepository/signed-with-keyless.yaml b/config/testdata/ocirepository/signed-with-keyless.yaml index efb02fc28..ff46ed30d 100644 --- a/config/testdata/ocirepository/signed-with-keyless.yaml +++ b/config/testdata/ocirepository/signed-with-keyless.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: OCIRepository metadata: name: podinfo-deploy-signed-with-keyless diff --git a/config/testdata/ocirepository/signed-with-notation.yaml b/config/testdata/ocirepository/signed-with-notation.yaml new file mode 100644 index 000000000..55820f6d4 --- /dev/null +++ b/config/testdata/ocirepository/signed-with-notation.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: podinfo-deploy-signed-with-notation +spec: + interval: 5m + url: oci://ghcr.io/stefanprodan/podinfo-deploy + ref: + semver: "6.6.x" + verify: + provider: notation + secretRef: + name: notation-config diff --git a/docs/api/v1/source.md b/docs/api/v1/source.md index ff34c7e60..935d74275 100644 --- a/docs/api/v1/source.md +++ b/docs/api/v1/source.md @@ -9,11 +9,19 @@

Package v1 contains API Schema definitions for the source v1 API group

Resource Types: -

GitRepository +

Bucket

-

GitRepository is the Schema for the gitrepositories API.

+

Bucket is the Schema for the buckets API.

@@ -38,7 +46,7 @@ string string @@ -59,8 +67,8 @@ Refer to the Kubernetes API documentation for the fields of the @@ -70,91 +78,147 @@ GitRepositorySpec
-GitRepository +Bucket
spec
- -GitRepositorySpec + +BucketSpec
+ + + + + + + + + + + + + + + + @@ -169,62 +233,63 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference
-url
+provider
string
-

URL specifies the Git repository URL, it can be an HTTP/S or SSH address.

+(Optional) +

Provider of the object storage bucket. +Defaults to ‘generic’, which expects an S3 (API) compatible object +storage.

-secretRef
+bucketName
- -github.com/fluxcd/pkg/apis/meta.LocalObjectReference +string + +
+

BucketName is the name of the object storage bucket.

+
+endpoint
+ +string + +
+

Endpoint is the object storage address the BucketName is located at.

+
+sts
+ + +BucketSTSSpec
(Optional) -

SecretRef specifies the Secret containing authentication credentials for -the GitRepository. -For HTTPS repositories the Secret must contain ‘username’ and ‘password’ -fields for basic auth or ‘bearerToken’ field for token auth. -For SSH repositories the Secret must contain ‘identity’ -and ‘known_hosts’ fields.

+

STS specifies the required configuration to use a Security Token +Service for fetching temporary credentials to authenticate in a +Bucket provider.

+

This field is only supported for the aws and generic providers.

-interval
+insecure
- -Kubernetes meta/v1.Duration - +bool
-

Interval at which the GitRepository URL is checked for updates. -This interval is approximate and may be subject to jitter to ensure -efficient use of resources.

+(Optional) +

Insecure allows connecting to a non-TLS HTTP Endpoint.

-timeout
+region
- -Kubernetes meta/v1.Duration - +string
(Optional) -

Timeout for Git operations like cloning, defaults to 60s.

+

Region of the Endpoint where the BucketName is located in.

-ref
+prefix
- -GitRepositoryRef +string + +
+(Optional) +

Prefix to use for server-side filtering of files in the Bucket.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference
(Optional) -

Reference specifies the Git reference to resolve and monitor for -changes, defaults to the ‘master’ branch.

+

SecretRef specifies the Secret containing authentication credentials +for the Bucket.

-verify
+serviceAccountName
- -GitRepositoryVerification +string + +
+(Optional) +

ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate +the bucket. This field is only supported for the ‘gcp’ and ‘aws’ providers. +For more information about workload identity: +https://fluxcd.io/flux/components/source/buckets/#workload-identity

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference
(Optional) -

Verification specifies the configuration to verify the Git commit -signature(s).

+

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +bucket. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

This field is only supported for the generic provider.

(Optional)

ProxySecretRef specifies the Secret containing the proxy configuration -to use while communicating with the Git server.

+to use while communicating with the Bucket server.

-ignore
+interval
-string + +Kubernetes meta/v1.Duration +
-(Optional) -

Ignore overrides the set of excluded patterns in the .sourceignore format -(which is the same as .gitignore). If not provided, a default will be used, -consult the documentation for your version to find out what those are.

+

Interval at which the Bucket Endpoint is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

-suspend
+timeout
-bool + +Kubernetes meta/v1.Duration +
(Optional) -

Suspend tells the controller to suspend the reconciliation of this -GitRepository.

+

Timeout for fetch operations, defaults to 60s.

-recurseSubmodules
+ignore
-bool +string
(Optional) -

RecurseSubmodules enables the initialization of all submodules within -the GitRepository as cloned from the URL, using their default settings.

+

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

-include
+suspend
- -[]GitRepositoryInclude - +bool
(Optional) -

Include specifies a list of GitRepository resources which Artifacts -should be included in the Artifact produced for this GitRepository.

+

Suspend tells the controller to suspend the reconciliation of this +Bucket.

@@ -234,8 +299,8 @@ should be included in the Artifact produced for this GitRepository.

status
- -GitRepositoryStatus + +BucketStatus @@ -246,13 +311,9 @@ GitRepositoryStatus
-

Artifact +

GitRepository

-

-(Appears on: -GitRepositoryStatus) -

-

Artifact represents the output of a Source reconciliation.

+

GitRepository is the Schema for the gitrepositories API.

@@ -265,118 +326,166 @@ GitRepositoryStatus + + + + + + + + + + + + + + + +
-path
- +apiVersion
+string
+source.toolkit.fluxcd.io/v1 +
+kind
string +
+GitRepository +
+metadata
+ + +Kubernetes meta/v1.ObjectMeta +
-

Path is the relative file path of the Artifact. It can be used to locate -the file in the root of the Artifact storage on the local file system of -the controller managing the Source.

+Refer to the Kubernetes API documentation for the fields of the +metadata field.
+spec
+ + +GitRepositorySpec + + +
+
+
+ + + + + + + - -
url
string
-

URL is the HTTP address of the Artifact as exposed by the controller -managing the Source. It can be used to retrieve the Artifact for -consumption, e.g. by another controller applying the Artifact contents.

+

URL specifies the Git repository URL, it can be an HTTP/S or SSH address.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials for +the GitRepository. +For HTTPS repositories the Secret must contain ‘username’ and ‘password’ +fields for basic auth or ‘bearerToken’ field for token auth. +For SSH repositories the Secret must contain ‘identity’ +and ‘known_hosts’ fields.

-revision
+provider
string
-

Revision is a human-readable identifier traceable in the origin source -system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.

+(Optional) +

Provider used for authentication, can be ‘azure’, ‘github’, ‘generic’. +When not specified, defaults to ‘generic’.

-digest
+serviceAccountName
string
(Optional) -

Digest is the digest of the file in the form of ‘:’.

+

ServiceAccountName is the name of the Kubernetes ServiceAccount used to +authenticate to the GitRepository. This field is only supported for ‘azure’ provider.

-lastUpdateTime
+interval
- -Kubernetes meta/v1.Time + +Kubernetes meta/v1.Duration
-

LastUpdateTime is the timestamp corresponding to the last update of the -Artifact.

+

Interval at which the GitRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

-size
+timeout
-int64 + +Kubernetes meta/v1.Duration +
(Optional) -

Size is the number of bytes in the file.

+

Timeout for Git operations like cloning, defaults to 60s.

-metadata
+ref
-map[string]string + +GitRepositoryRef +
(Optional) -

Metadata holds upstream information such as OCI annotations.

+

Reference specifies the Git reference to resolve and monitor for +changes, defaults to the ‘master’ branch.

- - -

GitRepositoryInclude -

-

-(Appears on: -GitRepositorySpec, -GitRepositoryStatus) -

-

GitRepositoryInclude specifies a local reference to a GitRepository which -Artifact (sub-)contents must be included, and where they should be placed.

-
-
- - - - + + - - + + + + + + + + + + + + + + + + + + +
FieldDescription +verify
+ + +GitRepositoryVerification + + +
+(Optional) +

Verification specifies the configuration to verify the Git commit +signature(s).

+
-repository
+proxySecretRef
github.com/fluxcd/pkg/apis/meta.LocalObjectReference @@ -384,47 +493,2704 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference
-

GitRepositoryRef specifies the GitRepository which Artifact contents -must be included.

+(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the Git server.

-fromPath
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +GitRepository.

+
+recurseSubmodules
+ +bool + +
+(Optional) +

RecurseSubmodules enables the initialization of all submodules within +the GitRepository as cloned from the URL, using their default settings.

+
+include
+ + +[]GitRepositoryInclude + + +
+(Optional) +

Include specifies a list of GitRepository resources which Artifacts +should be included in the Artifact produced for this GitRepository.

+
+sparseCheckout
+ +[]string + +
+(Optional) +

SparseCheckout specifies a list of directories to checkout when cloning +the repository. If specified, only these directories are included in the +Artifact produced for this GitRepository.

+
+
+status
+ + +GitRepositoryStatus + + +
+
+
+
+

HelmChart +

+

HelmChart is the Schema for the helmcharts API.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+source.toolkit.fluxcd.io/v1 +
+kind
+string +
+HelmChart +
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +HelmChartSpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+chart
+ +string + +
+

Chart is the name or path the Helm chart is available at in the +SourceRef.

+
+version
+ +string + +
+(Optional) +

Version is the chart version semver expression, ignored for charts from +GitRepository and Bucket sources. Defaults to latest when omitted.

+
+sourceRef
+ + +LocalHelmChartSourceReference + + +
+

SourceRef is the reference to the Source the chart is available at.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the HelmChart SourceRef is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+reconcileStrategy
+ +string + +
+(Optional) +

ReconcileStrategy determines what enables the creation of a new artifact. +Valid values are (‘ChartVersion’, ‘Revision’). +See the documentation of the values for an explanation on their behavior. +Defaults to ChartVersion when omitted.

+
+valuesFiles
+ +[]string + +
+(Optional) +

ValuesFiles is an alternative list of values files to use as the chart +values (values.yaml is not included by default), expected to be a +relative path in the SourceRef. +Values files are merged in the order of this list with the last file +overriding the first. Ignored when omitted.

+
+ignoreMissingValuesFiles
+ +bool + +
+(Optional) +

IgnoreMissingValuesFiles controls whether to silently ignore missing values +files rather than failing.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +source.

+
+verify
+ + +OCIRepositoryVerification + + +
+(Optional) +

Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic. +This field is only supported when using HelmRepository source with spec.type ‘oci’. +Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.

+
+
+status
+ + +HelmChartStatus + + +
+
+
+
+

HelmRepository +

+

HelmRepository is the Schema for the helmrepositories API.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+source.toolkit.fluxcd.io/v1 +
+kind
+string +
+HelmRepository +
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +HelmRepositorySpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+url
+ +string + +
+

URL of the Helm repository, a valid URL contains at least a protocol and +host.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials +for the HelmRepository. +For HTTP/S basic auth the secret must contain ‘username’ and ‘password’ +fields. +Support for TLS auth using the ‘certFile’ and ‘keyFile’, and/or ‘caFile’ +keys is deprecated. Please use .spec.certSecretRef instead.

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +registry. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

It takes precedence over the values specified in the Secret referred +to by .spec.secretRef.

+
+passCredentials
+ +bool + +
+(Optional) +

PassCredentials allows the credentials from the SecretRef to be passed +on to a host that does not match the host as defined in URL. +This may be required if the host of the advertised chart URLs in the +index differ from the defined URL. +Enabling this should be done with caution, as it can potentially result +in credentials getting stolen in a MITM-attack.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Interval at which the HelmRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP container registry. +This field is only taken into account if the .spec.type field is set to ‘oci’.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout is used for the index fetch operation for an HTTPS helm repository, +and for remote OCI Repository operations like pulling for an OCI helm +chart by the associated HelmChart. +Its default value is 60s.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +HelmRepository.

+
+accessFrom
+ + +github.com/fluxcd/pkg/apis/acl.AccessFrom + + +
+(Optional) +

AccessFrom specifies an Access Control List for allowing cross-namespace +references to this object. +NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092

+
+type
+ +string + +
+(Optional) +

Type of the HelmRepository. +When this field is set to “oci”, the URL field value must be prefixed with “oci://”.

+
+provider
+ +string + +
+(Optional) +

Provider used for authentication, can be ‘aws’, ‘azure’, ‘gcp’ or ‘generic’. +This field is optional, and only taken into account if the .spec.type field is set to ‘oci’. +When not specified, defaults to ‘generic’.

+
+
+status
+ + +HelmRepositoryStatus + + +
+
+
+
+

OCIRepository +

+

OCIRepository is the Schema for the ocirepositories API

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+source.toolkit.fluxcd.io/v1 +
+kind
+string +
+OCIRepository +
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +OCIRepositorySpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+url
+ +string + +
+

URL is a reference to an OCI artifact repository hosted +on a remote container registry.

+
+ref
+ + +OCIRepositoryRef + + +
+(Optional) +

The OCI reference to pull and monitor for changes, +defaults to the latest tag.

+
+layerSelector
+ + +OCILayerSelector + + +
+(Optional) +

LayerSelector specifies which layer should be extracted from the OCI artifact. +When not specified, the first layer found in the artifact is selected.

+
+provider
+ +string + +
+(Optional) +

The provider used for authentication, can be ‘aws’, ‘azure’, ‘gcp’ or ‘generic’. +When not specified, defaults to ‘generic’.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef contains the secret name containing the registry login +credentials to resolve image metadata. +The secret must be of type kubernetes.io/dockerconfigjson.

+
+verify
+ + +OCIRepositoryVerification + + +
+(Optional) +

Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic.

+
+serviceAccountName
+ +string + +
+(Optional) +

ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate +the image pull if the service account has attached pull secrets. For more information: +https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +registry. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+
+proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the container registry.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the OCIRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

The timeout for remote OCI Repository operations like pulling, defaults to 60s.

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP container registry.

+
+suspend
+ +bool + +
+(Optional) +

This flag tells the controller to suspend the reconciliation of this source.

+
+
+status
+ + +OCIRepositoryStatus + + +
+
+
+
+

BucketSTSSpec +

+

+(Appears on: +BucketSpec) +

+

BucketSTSSpec specifies the required configuration to use a Security Token +Service for fetching temporary credentials to authenticate in a Bucket +provider.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+provider
+ +string + +
+

Provider of the Security Token Service.

+
+endpoint
+ +string + +
+

Endpoint is the HTTP/S endpoint of the Security Token Service from +where temporary credentials will be fetched.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials +for the STS endpoint. This Secret must contain the fields username +and password and is supported only for the ldap provider.

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +STS endpoint. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

This field is only supported for the ldap provider.

+
+
+
+

BucketSpec +

+

+(Appears on: +Bucket) +

+

BucketSpec specifies the required configuration to produce an Artifact for +an object storage bucket.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+provider
+ +string + +
+(Optional) +

Provider of the object storage bucket. +Defaults to ‘generic’, which expects an S3 (API) compatible object +storage.

+
+bucketName
+ +string + +
+

BucketName is the name of the object storage bucket.

+
+endpoint
+ +string + +
+

Endpoint is the object storage address the BucketName is located at.

+
+sts
+ + +BucketSTSSpec + + +
+(Optional) +

STS specifies the required configuration to use a Security Token +Service for fetching temporary credentials to authenticate in a +Bucket provider.

+

This field is only supported for the aws and generic providers.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP Endpoint.

+
+region
+ +string + +
+(Optional) +

Region of the Endpoint where the BucketName is located in.

+
+prefix
+ +string + +
+(Optional) +

Prefix to use for server-side filtering of files in the Bucket.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials +for the Bucket.

+
+serviceAccountName
+ +string + +
+(Optional) +

ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate +the bucket. This field is only supported for the ‘gcp’ and ‘aws’ providers. +For more information about workload identity: +https://fluxcd.io/flux/components/source/buckets/#workload-identity

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +bucket. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

This field is only supported for the generic provider.

+
+proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the Bucket server.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the Bucket Endpoint is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout for fetch operations, defaults to 60s.

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +Bucket.

+
+
+
+

BucketStatus +

+

+(Appears on: +Bucket) +

+

BucketStatus records the observed state of a Bucket.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+observedGeneration
+ +int64 + +
+(Optional) +

ObservedGeneration is the last observed generation of the Bucket object.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the Bucket.

+
+url
+ +string + +
+(Optional) +

URL is the dynamic fetch link for the latest Artifact. +It is provided on a “best effort” basis, and using the precise +BucketStatus.Artifact data is recommended.

+
+artifact
+ + +github.com/fluxcd/pkg/apis/meta.Artifact + + +
+(Optional) +

Artifact represents the last successful Bucket reconciliation.

+
+observedIgnore
+ +string + +
+(Optional) +

ObservedIgnore is the observed exclusion patterns used for constructing +the source artifact.

+
+ReconcileRequestStatus
+ + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + +
+

+(Members of ReconcileRequestStatus are embedded into this type.) +

+
+
+
+

ExternalArtifact +

+

ExternalArtifact is the Schema for the external artifacts API

+
+
+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +ExternalArtifactSpec + + +
+
+
+ + + + + +
+sourceRef
+ + +github.com/fluxcd/pkg/apis/meta.NamespacedObjectKindReference + + +
+(Optional) +

SourceRef points to the Kubernetes custom resource for +which the artifact is generated.

+
+
+status
+ + +ExternalArtifactStatus + + +
+
+
+
+

ExternalArtifactSpec +

+

+(Appears on: +ExternalArtifact) +

+

ExternalArtifactSpec defines the desired state of ExternalArtifact

+
+
+ + + + + + + + + + + + + +
FieldDescription
+sourceRef
+ + +github.com/fluxcd/pkg/apis/meta.NamespacedObjectKindReference + + +
+(Optional) +

SourceRef points to the Kubernetes custom resource for +which the artifact is generated.

+
+
+
+

ExternalArtifactStatus +

+

+(Appears on: +ExternalArtifact) +

+

ExternalArtifactStatus defines the observed state of ExternalArtifact

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+artifact
+ + +github.com/fluxcd/pkg/apis/meta.Artifact + + +
+(Optional) +

Artifact represents the output of an ExternalArtifact reconciliation.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the ExternalArtifact.

+
+
+
+

GitRepositoryInclude +

+

+(Appears on: +GitRepositorySpec, +GitRepositoryStatus) +

+

GitRepositoryInclude specifies a local reference to a GitRepository which +Artifact (sub-)contents must be included, and where they should be placed.

+
+
+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+repository
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+

GitRepositoryRef specifies the GitRepository which Artifact contents +must be included.

+
+fromPath
+ +string + +
+(Optional) +

FromPath specifies the path to copy contents from, defaults to the root +of the Artifact.

+
+toPath
+ +string + +
+(Optional) +

ToPath specifies the path to copy contents to, defaults to the name of +the GitRepositoryRef.

+
+
+
+

GitRepositoryRef +

+

+(Appears on: +GitRepositorySpec) +

+

GitRepositoryRef specifies the Git reference to resolve and checkout.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+branch
+ +string + +
+(Optional) +

Branch to check out, defaults to ‘master’ if no other field is defined.

+
+tag
+ +string + +
+(Optional) +

Tag to check out, takes precedence over Branch.

+
+semver
+ +string + +
+(Optional) +

SemVer tag expression to check out, takes precedence over Tag.

+
+name
+ +string + +
+(Optional) +

Name of the reference to check out; takes precedence over Branch, Tag and SemVer.

+

It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description +Examples: “refs/heads/main”, “refs/tags/v0.1.0”, “refs/pull/420/head”, “refs/merge-requests/1/head”

+
+commit
+ +string + +
+(Optional) +

Commit SHA to check out, takes precedence over all reference fields.

+

This can be combined with Branch to shallow clone the branch, in which +the commit is expected to exist.

+
+
+
+

GitRepositorySpec +

+

+(Appears on: +GitRepository) +

+

GitRepositorySpec specifies the required configuration to produce an +Artifact for a Git repository.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+url
+ +string + +
+

URL specifies the Git repository URL, it can be an HTTP/S or SSH address.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials for +the GitRepository. +For HTTPS repositories the Secret must contain ‘username’ and ‘password’ +fields for basic auth or ‘bearerToken’ field for token auth. +For SSH repositories the Secret must contain ‘identity’ +and ‘known_hosts’ fields.

+
+provider
+ +string + +
+(Optional) +

Provider used for authentication, can be ‘azure’, ‘github’, ‘generic’. +When not specified, defaults to ‘generic’.

+
+serviceAccountName
+ +string + +
+(Optional) +

ServiceAccountName is the name of the Kubernetes ServiceAccount used to +authenticate to the GitRepository. This field is only supported for ‘azure’ provider.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the GitRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout for Git operations like cloning, defaults to 60s.

+
+ref
+ + +GitRepositoryRef + + +
+(Optional) +

Reference specifies the Git reference to resolve and monitor for +changes, defaults to the ‘master’ branch.

+
+verify
+ + +GitRepositoryVerification + + +
+(Optional) +

Verification specifies the configuration to verify the Git commit +signature(s).

+
+proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the Git server.

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +GitRepository.

+
+recurseSubmodules
+ +bool + +
+(Optional) +

RecurseSubmodules enables the initialization of all submodules within +the GitRepository as cloned from the URL, using their default settings.

+
+include
+ + +[]GitRepositoryInclude + + +
+(Optional) +

Include specifies a list of GitRepository resources which Artifacts +should be included in the Artifact produced for this GitRepository.

+
+sparseCheckout
+ +[]string + +
+(Optional) +

SparseCheckout specifies a list of directories to checkout when cloning +the repository. If specified, only these directories are included in the +Artifact produced for this GitRepository.

+
+
+
+

GitRepositoryStatus +

+

+(Appears on: +GitRepository) +

+

GitRepositoryStatus records the observed state of a Git repository.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+observedGeneration
+ +int64 + +
+(Optional) +

ObservedGeneration is the last observed generation of the GitRepository +object.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the GitRepository.

+
+artifact
+ + +github.com/fluxcd/pkg/apis/meta.Artifact + + +
+(Optional) +

Artifact represents the last successful GitRepository reconciliation.

+
+includedArtifacts
+ + +[]github.com/fluxcd/pkg/apis/meta.Artifact + + +
+(Optional) +

IncludedArtifacts contains a list of the last successfully included +Artifacts as instructed by GitRepositorySpec.Include.

+
+observedIgnore
+ +string + +
+(Optional) +

ObservedIgnore is the observed exclusion patterns used for constructing +the source artifact.

+
+observedRecurseSubmodules
+ +bool + +
+(Optional) +

ObservedRecurseSubmodules is the observed resource submodules +configuration used to produce the current Artifact.

+
+observedInclude
+ + +[]GitRepositoryInclude + + +
+(Optional) +

ObservedInclude is the observed list of GitRepository resources used to +produce the current Artifact.

+
+observedSparseCheckout
+ +[]string + +
+(Optional) +

ObservedSparseCheckout is the observed list of directories used to +produce the current Artifact.

+
+sourceVerificationMode
+ + +GitVerificationMode + + +
+(Optional) +

SourceVerificationMode is the last used verification mode indicating +which Git object(s) have been verified.

+
+ReconcileRequestStatus
+ + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + +
+

+(Members of ReconcileRequestStatus are embedded into this type.) +

+
+
+
+

GitRepositoryVerification +

+

+(Appears on: +GitRepositorySpec) +

+

GitRepositoryVerification specifies the Git commit signature verification +strategy.

+
+
+ + + + + + + + + + + + + + + + + +
FieldDescription
+mode
+ + +GitVerificationMode + + +
+(Optional) +

Mode specifies which Git object(s) should be verified.

+

The variants “head” and “HEAD” both imply the same thing, i.e. verify +the commit that the HEAD of the Git repository points to. The variant +“head” solely exists to ensure backwards compatibility.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+

SecretRef specifies the Secret containing the public keys of trusted Git +authors.

+
+
+
+

GitVerificationMode +(string alias)

+

+(Appears on: +GitRepositoryStatus, +GitRepositoryVerification) +

+

GitVerificationMode specifies the verification mode for a Git repository.

+

HelmChartSpec +

+

+(Appears on: +HelmChart) +

+

HelmChartSpec specifies the desired state of a Helm chart.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+chart
+ +string + +
+

Chart is the name or path the Helm chart is available at in the +SourceRef.

+
+version
+ +string + +
+(Optional) +

Version is the chart version semver expression, ignored for charts from +GitRepository and Bucket sources. Defaults to latest when omitted.

+
+sourceRef
+ + +LocalHelmChartSourceReference + + +
+

SourceRef is the reference to the Source the chart is available at.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the HelmChart SourceRef is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+reconcileStrategy
+ +string + +
+(Optional) +

ReconcileStrategy determines what enables the creation of a new artifact. +Valid values are (‘ChartVersion’, ‘Revision’). +See the documentation of the values for an explanation on their behavior. +Defaults to ChartVersion when omitted.

+
+valuesFiles
+ +[]string + +
+(Optional) +

ValuesFiles is an alternative list of values files to use as the chart +values (values.yaml is not included by default), expected to be a +relative path in the SourceRef. +Values files are merged in the order of this list with the last file +overriding the first. Ignored when omitted.

+
+ignoreMissingValuesFiles
+ +bool + +
+(Optional) +

IgnoreMissingValuesFiles controls whether to silently ignore missing values +files rather than failing.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +source.

+
+verify
+ + +OCIRepositoryVerification + + +
+(Optional) +

Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic. +This field is only supported when using HelmRepository source with spec.type ‘oci’. +Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.

+
+
+
+

HelmChartStatus +

+

+(Appears on: +HelmChart) +

+

HelmChartStatus records the observed state of the HelmChart.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+observedGeneration
+ +int64 + +
+(Optional) +

ObservedGeneration is the last observed generation of the HelmChart +object.

+
+observedSourceArtifactRevision
+ +string + +
+(Optional) +

ObservedSourceArtifactRevision is the last observed Artifact.Revision +of the HelmChartSpec.SourceRef.

+
+observedChartName
+ +string + +
+(Optional) +

ObservedChartName is the last observed chart name as specified by the +resolved chart reference.

+
+observedValuesFiles
+ +[]string + +
+(Optional) +

ObservedValuesFiles are the observed value files of the last successful +reconciliation. +It matches the chart in the last successfully reconciled artifact.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the HelmChart.

+
+url
+ +string + +
+(Optional) +

URL is the dynamic fetch link for the latest Artifact. +It is provided on a “best effort” basis, and using the precise +BucketStatus.Artifact data is recommended.

+
+artifact
+ + +github.com/fluxcd/pkg/apis/meta.Artifact + + +
+(Optional) +

Artifact represents the output of the last successful reconciliation.

+
+ReconcileRequestStatus
+ + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + +
+

+(Members of ReconcileRequestStatus are embedded into this type.) +

+
+
+
+

HelmRepositorySpec +

+

+(Appears on: +HelmRepository) +

+

HelmRepositorySpec specifies the required configuration to produce an +Artifact for a Helm repository index YAML.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+url
+ +string + +
+

URL of the Helm repository, a valid URL contains at least a protocol and +host.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials +for the HelmRepository. +For HTTP/S basic auth the secret must contain ‘username’ and ‘password’ +fields. +Support for TLS auth using the ‘certFile’ and ‘keyFile’, and/or ‘caFile’ +keys is deprecated. Please use .spec.certSecretRef instead.

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +registry. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

It takes precedence over the values specified in the Secret referred +to by .spec.secretRef.

+
+passCredentials
+ +bool + +
+(Optional) +

PassCredentials allows the credentials from the SecretRef to be passed +on to a host that does not match the host as defined in URL. +This may be required if the host of the advertised chart URLs in the +index differ from the defined URL. +Enabling this should be done with caution, as it can potentially result +in credentials getting stolen in a MITM-attack.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Interval at which the HelmRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP container registry. +This field is only taken into account if the .spec.type field is set to ‘oci’.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout is used for the index fetch operation for an HTTPS helm repository, +and for remote OCI Repository operations like pulling for an OCI helm +chart by the associated HelmChart. +Its default value is 60s.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +HelmRepository.

+
+accessFrom
+ + +github.com/fluxcd/pkg/apis/acl.AccessFrom + + +
+(Optional) +

AccessFrom specifies an Access Control List for allowing cross-namespace +references to this object. +NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092

+
+type
+ +string + +
+(Optional) +

Type of the HelmRepository. +When this field is set to “oci”, the URL field value must be prefixed with “oci://”.

+
+provider
+ +string + +
+(Optional) +

Provider used for authentication, can be ‘aws’, ‘azure’, ‘gcp’ or ‘generic’. +This field is optional, and only taken into account if the .spec.type field is set to ‘oci’. +When not specified, defaults to ‘generic’.

+
+
+
+

HelmRepositoryStatus +

+

+(Appears on: +HelmRepository) +

+

HelmRepositoryStatus records the observed state of the HelmRepository.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+observedGeneration
+ +int64 + +
+(Optional) +

ObservedGeneration is the last observed generation of the HelmRepository +object.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the HelmRepository.

+
+url
+ +string + +
+(Optional) +

URL is the dynamic fetch link for the latest Artifact. +It is provided on a “best effort” basis, and using the precise +HelmRepositoryStatus.Artifact data is recommended.

+
+artifact
+ + +github.com/fluxcd/pkg/apis/meta.Artifact + + +
+(Optional) +

Artifact represents the last successful HelmRepository reconciliation.

+
+ReconcileRequestStatus
+ + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + +
+

+(Members of ReconcileRequestStatus are embedded into this type.) +

+
+
+
+

LocalHelmChartSourceReference +

+

+(Appears on: +HelmChartSpec) +

+

LocalHelmChartSourceReference contains enough information to let you locate +the typed referenced object at namespace level.

+
+
+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+ +string + +
+(Optional) +

APIVersion of the referent.

+
+kind
+ +string + +
+

Kind of the referent, valid values are (‘HelmRepository’, ‘GitRepository’, +‘Bucket’).

+
+name
+ +string + +
+

Name of the referent.

+
+
+
+

OCILayerSelector +

+

+(Appears on: +OCIRepositorySpec, +OCIRepositoryStatus) +

+

OCILayerSelector specifies which layer should be extracted from an OCI Artifact

+
+
+ + + + + + + + + +
FieldDescription
+mediaType
string
(Optional) -

FromPath specifies the path to copy contents from, defaults to the root -of the Artifact.

+

MediaType specifies the OCI media type of the layer +which should be extracted from the OCI Artifact. The +first layer matching this type is selected.

-toPath
+operation
string
(Optional) -

ToPath specifies the path to copy contents to, defaults to the name of -the GitRepositoryRef.

+

Operation specifies how the selected layer should be processed. +By default, the layer compressed content is extracted to storage. +When the operation is set to ‘copy’, the layer compressed content +is persisted to storage as it is.

-

GitRepositoryRef +

OCIRepositoryRef

(Appears on: -GitRepositorySpec) +OCIRepositorySpec)

-

GitRepositoryRef specifies the Git reference to resolve and checkout.

+

OCIRepositoryRef defines the image reference for the OCIRepository’s URL

@@ -437,26 +3203,15 @@ the GitRepositoryRef.

- - - - @@ -468,49 +3223,45 @@ string
-branch
- -string - -
-(Optional) -

Branch to check out, defaults to ‘master’ if no other field is defined.

-
-tag
+digest
string
(Optional) -

Tag to check out, takes precedence over Branch.

+

Digest is the image digest to pull, takes precedence over SemVer. +The value should be in the format ‘sha256:’.

(Optional) -

SemVer tag expression to check out, takes precedence over Tag.

+

SemVer is the range of tags to pull selecting the latest within +the range, takes precedence over Tag.

-name
+semverFilter
string
(Optional) -

Name of the reference to check out; takes precedence over Branch, Tag and SemVer.

-

It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description -Examples: “refs/heads/main”, “refs/tags/v0.1.0”, “refs/pull/420/head”, “refs/merge-requests/1/head”

+

SemverFilter is a regex pattern to filter the tags within the SemVer range.

-commit
+tag
string
(Optional) -

Commit SHA to check out, takes precedence over all reference fields.

-

This can be combined with Branch to shallow clone the branch, in which -the commit is expected to exist.

+

Tag is the image tag to pull, defaults to latest.

-

GitRepositorySpec +

OCIRepositorySpec

(Appears on: -GitRepository) +OCIRepository)

-

GitRepositorySpec specifies the required configuration to produce an -Artifact for a Git repository.

+

OCIRepositorySpec defines the desired state of OCIRepository

@@ -529,85 +3280,122 @@ string + + + + + + + + @@ -622,75 +3410,87 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + + +
-

URL specifies the Git repository URL, it can be an HTTP/S or SSH address.

+

URL is a reference to an OCI artifact repository hosted +on a remote container registry.

-secretRef
+ref
- -github.com/fluxcd/pkg/apis/meta.LocalObjectReference + +OCIRepositoryRef
(Optional) -

SecretRef specifies the Secret containing authentication credentials for -the GitRepository. -For HTTPS repositories the Secret must contain ‘username’ and ‘password’ -fields for basic auth or ‘bearerToken’ field for token auth. -For SSH repositories the Secret must contain ‘identity’ -and ‘known_hosts’ fields.

+

The OCI reference to pull and monitor for changes, +defaults to the latest tag.

-interval
+layerSelector
- -Kubernetes meta/v1.Duration + +OCILayerSelector
-

Interval at which the GitRepository URL is checked for updates. -This interval is approximate and may be subject to jitter to ensure -efficient use of resources.

+(Optional) +

LayerSelector specifies which layer should be extracted from the OCI artifact. +When not specified, the first layer found in the artifact is selected.

-timeout
+provider
- -Kubernetes meta/v1.Duration - +string
(Optional) -

Timeout for Git operations like cloning, defaults to 60s.

+

The provider used for authentication, can be ‘aws’, ‘azure’, ‘gcp’ or ‘generic’. +When not specified, defaults to ‘generic’.

-ref
+secretRef
- -GitRepositoryRef + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference
(Optional) -

Reference specifies the Git reference to resolve and monitor for -changes, defaults to the ‘master’ branch.

+

SecretRef contains the secret name containing the registry login +credentials to resolve image metadata. +The secret must be of type kubernetes.io/dockerconfigjson.

verify
- -GitRepositoryVerification + +OCIRepositoryVerification
(Optional) -

Verification specifies the configuration to verify the Git commit -signature(s).

+

Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic.

+
+serviceAccountName
+ +string + +
+(Optional) +

ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate +the image pull if the service account has attached pull secrets. For more information: +https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +registry. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

(Optional)

ProxySecretRef specifies the Secret containing the proxy configuration -to use while communicating with the Git server.

+to use while communicating with the container registry.

-ignore
+interval
-string + +Kubernetes meta/v1.Duration + + +
+

Interval at which the OCIRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+timeout
+ + +Kubernetes meta/v1.Duration +
(Optional) -

Ignore overrides the set of excluded patterns in the .sourceignore format -(which is the same as .gitignore). If not provided, a default will be used, -consult the documentation for your version to find out what those are.

+

The timeout for remote OCI Repository operations like pulling, defaults to 60s.

-suspend
+ignore
-bool +string
(Optional) -

Suspend tells the controller to suspend the reconciliation of this -GitRepository.

+

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

-recurseSubmodules
+insecure
bool
(Optional) -

RecurseSubmodules enables the initialization of all submodules within -the GitRepository as cloned from the URL, using their default settings.

+

Insecure allows connecting to a non-TLS HTTP container registry.

-include
+suspend
- -[]GitRepositoryInclude - +bool
(Optional) -

Include specifies a list of GitRepository resources which Artifacts -should be included in the Artifact produced for this GitRepository.

+

This flag tells the controller to suspend the reconciliation of this source.

-

GitRepositoryStatus +

OCIRepositoryStatus

(Appears on: -GitRepository) +OCIRepository)

-

GitRepositoryStatus records the observed state of a Git repository.

+

OCIRepositoryStatus defines the observed state of OCIRepository

@@ -710,8 +3510,7 @@ int64 @@ -725,36 +3524,33 @@ object.

@@ -772,74 +3568,111 @@ the source artifact.

+ + +
(Optional) -

ObservedGeneration is the last observed generation of the GitRepository -object.

+

ObservedGeneration is the last observed generation.

(Optional) -

Conditions holds the conditions for the GitRepository.

+

Conditions holds the conditions for the OCIRepository.

-artifact
+url
- -Artifact - +string
(Optional) -

Artifact represents the last successful GitRepository reconciliation.

+

URL is the download link for the artifact output of the last OCI Repository sync.

-includedArtifacts
+artifact
- -[]Artifact + +github.com/fluxcd/pkg/apis/meta.Artifact
(Optional) -

IncludedArtifacts contains a list of the last successfully included -Artifacts as instructed by GitRepositorySpec.Include.

+

Artifact represents the output of the last successful OCI Repository sync.

-observedRecurseSubmodules
+observedLayerSelector
-bool + +OCILayerSelector +
(Optional) -

ObservedRecurseSubmodules is the observed resource submodules -configuration used to produce the current Artifact.

+

ObservedLayerSelector is the observed layer selector used for constructing +the source artifact.

-observedInclude
+ReconcileRequestStatus
- -[]GitRepositoryInclude + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
-(Optional) -

ObservedInclude is the observed list of GitRepository resources used to -produce the current Artifact.

+

+(Members of ReconcileRequestStatus are embedded into this type.) +

+
+
+
+

OCIRepositoryVerification +

+

+(Appears on: +HelmChartSpec, +OCIRepositorySpec) +

+

OCIRepositoryVerification verifies the authenticity of an OCI Artifact

+
+
+ + + + + + + + + + +
FieldDescription
+provider
+ +string + +
+

Provider specifies the technology used to sign the OCI Artifact.

-sourceVerificationMode
+secretRef
- -GitVerificationMode + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference
(Optional) -

SourceVerificationMode is the last used verification mode indicating -which Git object(s) have been verified.

+

SecretRef specifies the Kubernetes Secret containing the +trusted public keys.

-ReconcileRequestStatus
+matchOIDCIdentity
- -github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + +[]OIDCIdentityMatch
-

-(Members of ReconcileRequestStatus are embedded into this type.) -

+(Optional) +

MatchOIDCIdentity specifies the identity matching criteria to use +while verifying an OCI artifact which was signed using Cosign keyless +signing. The artifact’s identity is deemed to be verified if any of the +specified matchers match against the identity.

-

GitRepositoryVerification +

OIDCIdentityMatch

(Appears on: -GitRepositorySpec) +OCIRepositoryVerification)

-

GitRepositoryVerification specifies the Git commit signature verification -strategy.

+

OIDCIdentityMatch specifies options for verifying the certificate identity, +i.e. the issuer and the subject of the certificate.

@@ -852,47 +3685,34 @@ strategy.

-mode
+issuer
- -GitVerificationMode - +string
-(Optional) -

Mode specifies which Git object(s) should be verified.

-

The variants “head” and “HEAD” both imply the same thing, i.e. verify -the commit that the HEAD of the Git repository points to. The variant -“head” solely exists to ensure backwards compatibility.

+

Issuer specifies the regex pattern to match against to verify +the OIDC issuer in the Fulcio certificate. The pattern must be a +valid Go regular expression.

-secretRef
+subject
- -github.com/fluxcd/pkg/apis/meta.LocalObjectReference - +string
-

SecretRef specifies the Secret containing the public keys of trusted Git -authors.

+

Subject specifies the regex pattern to match against to verify +the identity subject in the Fulcio certificate. The pattern must +be a valid Go regular expression.

-

GitVerificationMode -(string alias)

-

-(Appears on: -GitRepositoryStatus, -GitRepositoryVerification) -

-

GitVerificationMode specifies the verification mode for a Git repository.

Source

Source interface must be supported by all API types. diff --git a/docs/api/v1beta2/source.md b/docs/api/v1beta2/source.md index 04c3e328f..8234f7014 100644 --- a/docs/api/v1beta2/source.md +++ b/docs/api/v1beta2/source.md @@ -114,6 +114,23 @@ string +sts
+ + +BucketSTSSpec + + + + +(Optional) +

STS specifies the required configuration to use a Security Token +Service for fetching temporary credentials to authenticate in a +Bucket provider.

+

This field is only supported for the aws and generic providers.

+ + + + insecure
bool @@ -165,6 +182,47 @@ for the Bucket.

+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + + + +(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +bucket. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

This field is only supported for the generic provider.

+ + + + +proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + + + +(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the Bucket server.

+ + + + interval
@@ -660,6 +718,19 @@ is merged before the ValuesFiles items. Ignored when omitted.

+ignoreMissingValuesFiles
+ +bool + + + +(Optional) +

IgnoreMissingValuesFiles controls whether to silently ignore missing values +files rather than failing.

+ + + + suspend
bool @@ -691,8 +762,8 @@ NOTE: Not implemented, provisional as of
-OCIRepositoryVerification + +github.com/fluxcd/source-controller/api/v1.OCIRepositoryVerification @@ -1109,8 +1180,8 @@ The secret must be of type kubernetes.io/dockerconfigjson.

verify
- -OCIRepositoryVerification + +github.com/fluxcd/source-controller/api/v1.OCIRepositoryVerification @@ -1164,6 +1235,21 @@ been deprecated.

+proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + + + +(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the container registry.

+ + + + interval
@@ -1369,6 +1455,94 @@ map[string]string +

BucketSTSSpec +

+

+(Appears on: +BucketSpec) +

+

BucketSTSSpec specifies the required configuration to use a Security Token +Service for fetching temporary credentials to authenticate in a Bucket +provider.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+provider
+ +string + +
+

Provider of the Security Token Service.

+
+endpoint
+ +string + +
+

Endpoint is the HTTP/S endpoint of the Security Token Service from +where temporary credentials will be fetched.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials +for the STS endpoint. This Secret must contain the fields username +and password and is supported only for the ldap provider.

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +STS endpoint. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

This field is only supported for the ldap provider.

+
+
+

BucketSpec

@@ -1425,6 +1599,23 @@ string +sts
+ + +BucketSTSSpec + + + + +(Optional) +

STS specifies the required configuration to use a Security Token +Service for fetching temporary credentials to authenticate in a +Bucket provider.

+

This field is only supported for the aws and generic providers.

+ + + + insecure
bool @@ -1476,6 +1667,47 @@ for the Bucket.

+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + + + +(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +bucket. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

This field is only supported for the generic provider.

+ + + + +proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + + + +(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the Bucket server.

+ + + + interval
@@ -2329,6 +2561,19 @@ is merged before the ValuesFiles items. Ignored when omitted.

+ignoreMissingValuesFiles
+ +bool + + + +(Optional) +

IgnoreMissingValuesFiles controls whether to silently ignore missing values +files rather than failing.

+ + + + suspend
bool @@ -2360,8 +2605,8 @@ NOTE: Not implemented, provisional as of
-OCIRepositoryVerification + +github.com/fluxcd/source-controller/api/v1.OCIRepositoryVerification @@ -2436,6 +2681,20 @@ resolved chart reference.

+observedValuesFiles
+ +[]string + + + +(Optional) +

ObservedValuesFiles are the observed value files of the last successful +reconciliation. +It matches the chart in the last successfully reconciled artifact.

+ + + + conditions
@@ -2938,6 +3197,18 @@ the range, takes precedence over Tag.

+semverFilter
+ +string + + + +(Optional) +

SemverFilter is a regex pattern to filter the tags within the SemVer range.

+ + + + tag
string @@ -3044,8 +3315,8 @@ The secret must be of type kubernetes.io/dockerconfigjson.

verify
-
-OCIRepositoryVerification + +github.com/fluxcd/source-controller/api/v1.OCIRepositoryVerification @@ -3099,6 +3370,21 @@ been deprecated.

+proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + + + +(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the container registry.

+ + + + interval
@@ -3305,119 +3591,6 @@ github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus -

OCIRepositoryVerification -

-

-(Appears on: -HelmChartSpec, -OCIRepositorySpec) -

-

OCIRepositoryVerification verifies the authenticity of an OCI Artifact

-
-
- - - - - - - - - - - - - - - - - - - - - -
FieldDescription
-provider
- -string - -
-

Provider specifies the technology used to sign the OCI Artifact.

-
-secretRef
- - -github.com/fluxcd/pkg/apis/meta.LocalObjectReference - - -
-(Optional) -

SecretRef specifies the Kubernetes Secret containing the -trusted public keys.

-
-matchOIDCIdentity
- - -[]OIDCIdentityMatch - - -
-(Optional) -

MatchOIDCIdentity specifies the identity matching criteria to use -while verifying an OCI artifact which was signed using Cosign keyless -signing. The artifact’s identity is deemed to be verified if any of the -specified matchers match against the identity.

-
-
-
-

OIDCIdentityMatch -

-

-(Appears on: -OCIRepositoryVerification) -

-

OIDCIdentityMatch specifies options for verifying the certificate identity, -i.e. the issuer and the subject of the certificate.

-
-
- - - - - - - - - - - - - - - - - -
FieldDescription
-issuer
- -string - -
-

Issuer specifies the regex pattern to match against to verify -the OIDC issuer in the Fulcio certificate. The pattern must be a -valid Go regular expression.

-
-subject
- -string - -
-

Subject specifies the regex pattern to match against to verify -the identity subject in the Fulcio certificate. The pattern must -be a valid Go regular expression.

-
-
-

Source

Source interface must be supported by all API types. diff --git a/docs/spec/v1/README.md b/docs/spec/v1/README.md index ae989ceb0..f08ea805f 100644 --- a/docs/spec/v1/README.md +++ b/docs/spec/v1/README.md @@ -6,6 +6,10 @@ This is the v1 API specification for defining the desired state sources of Kuber * Source kinds: + [GitRepository](gitrepositories.md) + + [OCIRepository](ocirepositories.md) + + [HelmRepository](helmrepositories.md) + + [HelmChart](helmcharts.md) + + [Bucket](buckets.md) ## Implementation @@ -15,3 +19,4 @@ This is the v1 API specification for defining the desired state sources of Kuber * [kustomize-controller](https://github.com/fluxcd/kustomize-controller/) * [helm-controller](https://github.com/fluxcd/helm-controller/) +* [source-watcher](https://github.com/fluxcd/source-watcher/) diff --git a/docs/spec/v1/buckets.md b/docs/spec/v1/buckets.md new file mode 100644 index 000000000..077ac952b --- /dev/null +++ b/docs/spec/v1/buckets.md @@ -0,0 +1,1433 @@ +# Buckets + + + +The `Bucket` API defines a Source to produce an Artifact for objects from storage +solutions like Amazon S3, Google Cloud Storage buckets, or any other solution +with a S3 compatible API such as Minio, Alibaba Cloud OSS and others. + +## Example + +The following is an example of a Bucket. It creates a tarball (`.tar.gz`) +Artifact with the fetched objects from an object storage with an S3 +compatible API (e.g. [Minio](https://min.io)): + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: minio-bucket + namespace: default +spec: + interval: 5m0s + endpoint: minio.example.com + insecure: true + secretRef: + name: minio-bucket-secret + bucketName: example +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-bucket-secret + namespace: default +type: Opaque +stringData: + accesskey: + secretkey: +``` + +In the above example: + +- A Bucket named `minio-bucket` is created, indicated by the + `.metadata.name` field. +- The source-controller checks the object storage bucket every five minutes, + indicated by the `.spec.interval` field. +- It authenticates to the `minio.example.com` endpoint with + the static credentials from the `minio-secret` Secret data, indicated by + the `.spec.endpoint` and `.spec.secretRef.name` fields. +- A list of object keys and their [etags](https://en.wikipedia.org/wiki/HTTP_ETag) + in the `.spec.bucketName` bucket is compiled, while filtering the keys using + [default ignore rules](#default-exclusions). +- The digest (algorithm defaults to SHA256) of the list is used as Artifact + revision, reported in-cluster in the `.status.artifact.revision` field. +- When the current Bucket revision differs from the latest calculated revision, + all objects are fetched and archived. +- The new Artifact is reported in the `.status.artifact` field. + +You can run this example by saving the manifest into `bucket.yaml`, and +changing the Bucket and Secret values to target a Minio instance you have +control over. + +**Note:** For more advanced examples targeting e.g. Amazon S3 or GCP, see +[Provider](#provider). + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f bucket.yaml + ``` + +2. Run `kubectl get buckets` to see the Bucket: + + ```console + NAME ENDPOINT AGE READY STATUS + minio-bucket minio.example.com 34s True stored artifact for revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + ``` + +3. Run `kubectl describe bucket minio-bucket` to see the [Artifact](#artifact) + and [Conditions](#conditions) in the Bucket's Status: + + ```console + ... + Status: + Artifact: + Digest: sha256:72aa638abb455ca5f9ef4825b949fd2de4d4be0a74895bf7ed2338622cd12686 + Last Update Time: 2024-02-01T23:43:38Z + Path: bucket/default/minio-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz + Revision: sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + Size: 38099 + URL: http://source-controller.source-system.svc.cluster.local./bucket/default/minio-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz + Conditions: + Last Transition Time: 2024-02-01T23:43:38Z + Message: stored artifact for revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: Ready + Last Transition Time: 2024-02-01T23:43:38Z + Message: stored artifact for revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: ArtifactInStorage + Observed Generation: 1 + URL: http://source-controller.source-system.svc.cluster.local./bucket/default/minio-bucket/latest.tar.gz + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal NewArtifact 82s source-controller stored artifact with 16 fetched files from 'example' bucket + ``` + +## Writing a Bucket spec + +As with all other Kubernetes config, a Bucket needs `apiVersion`, `kind`, and +`metadata` fields. The name of a Bucket object must be a valid +[DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +A Bucket also needs a +[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### Provider + +The `.spec.provider` field allows for specifying a Provider to enable provider +specific configurations, for example to communicate with a non-S3 compatible +API endpoint, or to change the authentication method. + +Supported options are: + +- [Generic](#generic) +- [AWS](#aws) +- [Azure](#azure) +- [GCP](#gcp) + +If you do not specify `.spec.provider`, it defaults to `generic`. + +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + +#### Generic + +When a Bucket's `spec.provider` is set to `generic`, the controller will +attempt to communicate with the specified [Endpoint](#endpoint) using the +[Minio Client SDK](https://github.com/minio/minio-go), which can communicate +with any Amazon S3 compatible object storage (including +[GCS](https://cloud.google.com/storage/docs/interoperability), +[Wasabi](https://wasabi-support.zendesk.com/hc/en-us/articles/360002079671-How-do-I-use-Minio-Client-with-Wasabi-), +and many others). + +The `generic` Provider _requires_ a [Secret reference](#secret-reference) to a +Secret with `.data.accesskey` and `.data.secretkey` values, used to +authenticate with static credentials. + +The Provider allows for specifying a region the bucket is in using the +[`.spec.region` field](#region), if required by the [Endpoint](#endpoint). + +##### Generic example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: generic-insecure + namespace: default +spec: + provider: generic + interval: 5m0s + bucketName: podinfo + endpoint: minio.minio.svc.cluster.local:9000 + timeout: 60s + insecure: true + secretRef: + name: minio-credentials +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-credentials + namespace: default +type: Opaque +data: + accesskey: + secretkey: +``` + +#### AWS + +When a Bucket's `.spec.provider` field is set to `aws`, the source-controller +will attempt to communicate with the specified [Endpoint](#endpoint) using the +[Minio Client SDK](https://github.com/minio/minio-go). + +Without a [Secret reference](#secret-reference), authorization using +credentials retrieved from the AWS EC2 service is attempted by default. When +a reference is specified, it expects a Secret with `.data.accesskey` and +`.data.secretkey` values, used to authenticate with static credentials. + +The Provider allows for specifying the +[Amazon AWS Region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions) +using the [`.spec.region` field](#region). + +For detailed setup instructions, see: https://fluxcd.io/flux/integrations/aws/#for-amazon-simple-storage-service + +##### AWS EC2 example + +**Note:** On EKS you have to create an [IAM role](#aws-iam-role-example) for +the source-controller service account that grants access to the bucket. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: aws + namespace: default +spec: + interval: 5m0s + provider: aws + bucketName: podinfo + endpoint: s3.amazonaws.com + region: us-east-1 + timeout: 30s +``` + +##### AWS IAM role example + +Replace `` with the specified `.spec.bucketName`. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::/*" + }, + { + "Sid": "", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::" + } + ] +} +``` + +##### AWS static auth example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: aws + namespace: default +spec: + interval: 5m0s + provider: aws + bucketName: podinfo + endpoint: s3.amazonaws.com + region: us-east-1 + secretRef: + name: aws-credentials +--- +apiVersion: v1 +kind: Secret +metadata: + name: aws-credentials + namespace: default +type: Opaque +data: + accesskey: + secretkey: +``` + +##### AWS Controller-Level Workload Identity example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: aws-controller-level-workload-identity + namespace: default +spec: + interval: 5m0s + provider: aws + bucketName: podinfo + endpoint: s3.amazonaws.com + region: us-east-1 + timeout: 30s +``` + +##### AWS Object-Level Workload Identity example + +**Note:** To use Object-Level Workload Identity (`.spec.serviceAccountName` with +cloud providers), the controller feature gate `ObjectLevelWorkloadIdentity` must +be enabled. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: aws-object-level-workload-identity + namespace: default +spec: + interval: 5m0s + provider: aws + bucketName: podinfo + endpoint: s3.amazonaws.com + region: us-east-1 + serviceAccountName: aws-workload-identity-sa + timeout: 30s +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: aws-workload-identity-sa + namespace: default + annotations: + eks.amazonaws.com/role-arn: arn:aws:iam::123456789012:role/flux-bucket-role +``` + +#### Azure + +When a Bucket's `.spec.provider` is set to `azure`, the source-controller will +attempt to communicate with the specified [Endpoint](#endpoint) using the +[Azure Blob Storage SDK for Go](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob). + +Without a [Secret reference](#secret-reference), authentication using a chain +with: + +- [Environment credentials](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential) +- [Workload Identity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#WorkloadIdentityCredential) +- [Managed Identity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential) + with the `AZURE_CLIENT_ID` +- Managed Identity with a system-assigned identity + +is attempted by default. If no chain can be established, the bucket +is assumed to be publicly reachable. + +When a reference is specified, it expects a Secret with one of the following +sets of `.data` fields: + +- `tenantId`, `clientId` and `clientSecret` for authenticating a Service + Principal with a secret. +- `tenantId`, `clientId` and `clientCertificate` (plus optionally + `clientCertificatePassword` and/or `clientCertificateSendChain`) for + authenticating a Service Principal with a certificate. +- `clientId` for authenticating using a Managed Identity. +- `accountKey` for authenticating using a + [Shared Key](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob#SharedKeyCredential). +- `sasKey` for authenticating using a [SAS Token](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview) + +For any Managed Identity and/or Microsoft Entra ID (Formerly Azure Active Directory) authentication method, +the base URL can be configured using `.data.authorityHost`. If not supplied, +[`AzurePublicCloud` is assumed](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AuthorityHost). + +##### Azure example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-public + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: podinfo + endpoint: https://podinfoaccount.blob.core.windows.net + timeout: 30s +``` + +##### Azure Service Principal Secret example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-service-principal-secret + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-sp-auth +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-sp-auth + namespace: default +type: Opaque +data: + tenantId: + clientId: + clientSecret: +``` + +##### Azure Service Principal Certificate example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-service-principal-cert + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-sp-auth +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-sp-auth + namespace: default +type: Opaque +data: + tenantId: + clientId: + clientCertificate: + # Plus optionally + clientCertificatePassword: + clientCertificateSendChain: # either "1" or "true" +``` + +##### Azure Managed Identity with Client ID example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-managed-identity + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-smi-auth +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-smi-auth + namespace: default +type: Opaque +data: + clientId: +``` + +##### Azure Blob Shared Key example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-shared-key + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-key +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-key + namespace: default +type: Opaque +data: + accountKey: +``` + +##### Workload Identity + +If you have [Workload Identity](https://azure.github.io/azure-workload-identity/docs/installation/managed-clusters.html) +set up on your cluster, you need to create an Azure Identity and give it +access to Azure Blob Storage. + +```shell +export IDENTITY_NAME="blob-access" + +az role assignment create --role "Storage Blob Data Reader" \ +--assignee-object-id "$(az identity show -n $IDENTITY_NAME -o tsv --query principalId -g $RESOURCE_GROUP)" \ +--scope "/subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts//blobServices/default/containers/" +``` + +Establish a federated identity between the Identity and the source-controller +ServiceAccount. + +```shell +export SERVICE_ACCOUNT_ISSUER="$(az aks show --resource-group --name --query "oidcIssuerProfile.issuerUrl" -otsv)" + +az identity federated-credential create \ + --name "kubernetes-federated-credential" \ + --identity-name "${IDENTITY_NAME}" \ + --resource-group "${RESOURCE_GROUP}" \ + --issuer "${SERVICE_ACCOUNT_ISSUER}" \ + --subject "system:serviceaccount:flux-system:source-controller" +``` + +Add a patch to label and annotate the source-controller Deployment and ServiceAccount +correctly so that it can match an identity binding: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: |- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + namespace: flux-system + annotations: + azure.workload.identity/client-id: + labels: + azure.workload.identity/use: "true" + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: source-controller + namespace: flux-system + labels: + azure.workload.identity/use: "true" + spec: + template: + metadata: + labels: + azure.workload.identity/use: "true" +``` + +If you have set up Workload Identity correctly and labeled the source-controller +Deployment and ServiceAccount, then you don't need to reference a Secret. For more information, +please see [documentation](https://azure.github.io/azure-workload-identity/docs/quick-start.html). + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-bucket + namespace: flux-system +spec: + interval: 5m0s + provider: azure + bucketName: testwi + endpoint: https://testfluxwi.blob.core.windows.net +``` + +##### Azure Object-Level Workload Identity example + +**Note:** To use Object-Level Workload Identity (`.spec.serviceAccountName` with +cloud providers), the controller feature gate `ObjectLevelWorkloadIdentity` must +be enabled. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-object-level-workload-identity + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: testwi + endpoint: https://testfluxwi.blob.core.windows.net + serviceAccountName: azure-workload-identity-sa + timeout: 30s +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: azure-workload-identity-sa + namespace: default + annotations: + azure.workload.identity/client-id: + azure.workload.identity/tenant-id: +``` + +##### Azure Blob SAS Token example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-sas-token + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-key +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-key + namespace: default +type: Opaque +data: + sasKey: +``` + +The `sasKey` only contains the SAS token e.g +`?sv=2020-08-0&ss=bfqt&srt=co&sp=rwdlacupitfx&se=2022-05-26T21:55:35Z&st=2022-05...`. +The leading question mark (`?`) is optional. The query values from the `sasKey` +data field in the Secrets gets merged with the ones in the `.spec.endpoint` of +the Bucket. If the same key is present in the both of them, the value in the +`sasKey` takes precedence. + +**Note:** The SAS token has an expiry date, and it must be updated before it +expires to allow Flux to continue to access Azure Storage. It is allowed to use +an account-level or container-level SAS token. + +The minimum permissions for an account-level SAS token are: + +- Allowed services: `Blob` +- Allowed resource types: `Container`, `Object` +- Allowed permissions: `Read`, `List` + +The minimum permissions for a container-level SAS token are: + +- Allowed permissions: `Read`, `List` + +Refer to the [Azure documentation](https://learn.microsoft.com/en-us/rest/api/storageservices/create-account-sas#blob-service) for a full overview on permissions. + +#### GCP + +For detailed setup instructions, see: https://fluxcd.io/flux/integrations/gcp/#for-google-cloud-storage + +##### GCP Controller-Level Workload Identity example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: gcp-controller-level-workload-identity + namespace: default +spec: + interval: 5m0s + provider: gcp + bucketName: podinfo + endpoint: storage.googleapis.com + region: us-east-1 + timeout: 30s +``` + +##### GCP Object-Level Workload Identity example + +**Note:** To use Object-Level Workload Identity (`.spec.serviceAccountName` with +cloud providers), the controller feature gate `ObjectLevelWorkloadIdentity` must +be enabled. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: gcp-object-level-workload-identity + namespace: default +spec: + interval: 5m0s + provider: gcp + bucketName: podinfo + endpoint: storage.googleapis.com + region: us-east-1 + serviceAccountName: gcp-workload-identity-sa + timeout: 30s +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: gcp-workload-identity-sa + namespace: default + annotations: + iam.gke.io/gcp-service-account: +``` + +##### GCP static auth example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: gcp-secret + namespace: default +spec: + interval: 5m0s + provider: gcp + bucketName: + endpoint: storage.googleapis.com + region: + secretRef: + name: gcp-service-account +--- +apiVersion: v1 +kind: Secret +metadata: + name: gcp-service-account + namespace: default +type: Opaque +data: + serviceaccount: +``` + +Where the (base64 decoded) value of `.data.serviceaccount` looks like this: + +```json +{ + "type": "service_account", + "project_id": "example", + "private_key_id": "28qwgh3gdf5hj3gb5fj3gsu5yfgh34f45324568hy2", + "private_key": "-----BEGIN PRIVATE KEY-----\nHwethgy123hugghhhbdcu6356dgyjhsvgvGFDHYgcdjbvcdhbsx63c\n76tgycfehuhVGTFYfw6t7ydgyVgydheyhuggycuhejwy6t35fthyuhegvcetf\nTFUHGTygghubhxe65ygt6tgyedgy326hucyvsuhbhcvcsjhcsjhcsvgdtHFCGi\nHcye6tyyg3gfyuhchcsbhygcijdbhyyTF66tuhcevuhdcbhuhhvftcuhbh3uh7t6y\nggvftUHbh6t5rfthhuGVRtfjhbfcrd5r67yuhuvgFTYjgvtfyghbfcdrhyjhbfctfdfyhvfg\ntgvggtfyghvft6tugvTF5r66tujhgvfrtyhhgfct6y7ytfr5ctvghbhhvtghhjvcttfycf\nffxfghjbvgcgyt67ujbgvctfyhVC7uhvgcyjvhhjvyujc\ncgghgvgcfhgg765454tcfthhgftyhhvvyvvffgfryyu77reredswfthhgfcftycfdrttfhf/\n-----END PRIVATE KEY-----\n", + "client_email": "test@example.iam.gserviceaccount.com", + "client_id": "32657634678762536746", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test%40podinfo.iam.gserviceaccount.com" +} +``` + +### Interval + +`.spec.interval` is a required field that specifies the interval which the +object storage bucket must be consulted at. + +After successfully reconciling a Bucket object, the source-controller requeues +the object for inspection after the specified interval. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `10m0s` to look at the object storage bucket every 10 minutes. + +If the `.metadata.generation` of a resource changes (due to e.g. the apply of a +change to the spec), this is handled instantly outside the interval window. + +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple Bucket objects are set up +with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + +### Endpoint + +`.spec.endpoint` is a required field that specifies the HTTP/S object storage +endpoint to connect to and fetch objects from. Connecting to an (insecure) +HTTP endpoint requires enabling [`.spec.insecure`](#insecure). + +Some endpoints require the specification of a [`.spec.region`](#region), +see [Provider](#provider) for more (provider specific) examples. + +### STS + +`.spec.sts` is an optional field for specifying the Security Token Service +configuration. A Security Token Service (STS) is a web service that issues +temporary security credentials. By adding this field, one may specify the +STS endpoint from where temporary credentials will be fetched. + +This field is only supported for the `aws` and `generic` bucket [providers](#provider). + +If using `.spec.sts`, the following fields are required: + +- `.spec.sts.provider`, the Security Token Service provider. The only supported + option for the `generic` bucket provider is `ldap`. The only supported option + for the `aws` bucket provider is `aws`. +- `.spec.sts.endpoint`, the HTTP/S endpoint of the Security Token Service. In + the case of `aws` this can be `https://sts.amazonaws.com`, or a Regional STS + Endpoint, or an Interface Endpoint created inside a VPC. In the case of + `ldap` this must be the LDAP server endpoint. + +When using the `ldap` provider, the following fields may also be specified: + +- `.spec.sts.secretRef.name`, the name of the Secret containing the LDAP + credentials. The Secret must contain the following keys: + - `username`, the username to authenticate with. + - `password`, the password to authenticate with. +- `.spec.sts.certSecretRef.name`, the name of the Secret containing the + TLS configuration for communicating with the STS endpoint. The contents + of this Secret must follow the same structure of + [`.spec.certSecretRef.name`](#cert-secret-reference). + +If [`.spec.proxySecretRef.name`](#proxy-secret-reference) is specified, +the proxy configuration will be used for commucating with the STS endpoint. + +Example for the `ldap` provider: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: example + namespace: example +spec: + interval: 5m + bucketName: example + provider: generic + endpoint: minio.example.com + sts: + provider: ldap + endpoint: https://ldap.example.com + secretRef: + name: ldap-credentials + certSecretRef: + name: ldap-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: ldap-credentials + namespace: example +type: Opaque +stringData: + username: + password: +--- +apiVersion: v1 +kind: Secret +metadata: + name: ldap-tls + namespace: example +type: kubernetes.io/tls # or Opaque +stringData: + tls.crt: + tls.key: + ca.crt: +``` + +### Bucket name + +`.spec.bucketName` is a required field that specifies which object storage +bucket on the [Endpoint](#endpoint) objects should be fetched from. + +See [Provider](#provider) for more (provider specific) examples. + +### Region + +`.spec.region` is an optional field to specify the region a +[`.spec.bucketName`](#bucket-name) is located in. + +See [Provider](#provider) for more (provider specific) examples. + +### Mutual TLS Authentication + +`.spec.certSecretRef.name` is an optional field to specify a secret containing +TLS certificate data for mutual TLS authentication. + +To authenticate towards a bucket using mutual TLS, +the referenced Secret's `.data` should contain the following keys: + +* `tls.crt` and `tls.key`, to specify the client certificate and private key used +for TLS client authentication. These must be used in conjunction, i.e. +specifying one without the other will lead to an error. +* `ca.crt`, to specify the CA certificate used to verify the server, which is +required if the server is using a self-signed certificate. + +The Secret should be of type `Opaque` or `kubernetes.io/tls`. All the files in +the Secret are expected to be [PEM-encoded][pem-encoding]. Assuming you have +three files; `client.key`, `client.crt` and `ca.crt` for the client private key, +client certificate and the CA certificate respectively, you can generate the +required Secret using the `flux create secret tls` command: + +```sh +flux create secret tls minio-tls --tls-key-file=client.key --tls-crt-file=client.crt --ca-crt-file=ca.crt +``` + +If TLS client authentication is not required, you can generate the secret with: + +```sh +flux create secret tls minio-tls --ca-crt-file=ca.crt +``` + +This API is only supported for the `generic` [provider](#provider). + +Example usage: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: example + namespace: example +spec: + interval: 5m + bucketName: example + provider: generic + endpoint: minio.example.com + certSecretRef: + name: minio-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-tls + namespace: example +type: kubernetes.io/tls # or Opaque +stringData: + tls.crt: + tls.key: + ca.crt: +``` + +### Proxy secret reference + +`.spec.proxySecretRef.name` is an optional field used to specify the name of a +Secret that contains the proxy settings for the object. These settings are used +for all the remote operations related to the Bucket. +The Secret can contain three keys: + +- `address`, to specify the address of the proxy server. This is a required key. +- `username`, to specify the username to use if the proxy server is protected by + basic authentication. This is an optional key. +- `password`, to specify the password to use if the proxy server is protected by + basic authentication. This is an optional key. + +Example: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: http-proxy +type: Opaque +stringData: + address: http://proxy.com + username: mandalorian + password: grogu +``` + +Proxying can also be configured in the source-controller Deployment directly by +using the standard environment variables such as `HTTPS_PROXY`, `ALL_PROXY`, etc. + +`.spec.proxySecretRef.name` takes precedence over all environment variables. + +### Insecure + +`.spec.insecure` is an optional field to allow connecting to an insecure (HTTP) +[endpoint](#endpoint), if set to `true`. The default value is `false`, +denying insecure (HTTP) connections. + +### Timeout + +`.spec.timeout` is an optional field to specify a timeout for object storage +fetch operations. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `1m30s` for a timeout of one minute and thirty seconds. +The default value is `60s`. + +### Secret reference + +`.spec.secretRef.name` is an optional field to specify a name reference to a +Secret in the same namespace as the Bucket, containing authentication +credentials for the object storage. For some `.spec.provider` implementations +the presence of the field is required, see [Provider](#provider) for more +details and examples. + +### Service Account reference + +`.spec.serviceAccountName` is an optional field to specify a Service Account +in the same namespace as Bucket with purpose depending on the value of +the `.spec.provider` field: + +- When `.spec.provider` is set to `generic`, the controller will fetch the image + pull secrets attached to the Service Account and use them for authentication. +- When `.spec.provider` is set to `aws`, `azure`, or `gcp`, the Service Account + will be used for Workload Identity authentication. In this case, the controller + feature gate `ObjectLevelWorkloadIdentity` must be enabled, otherwise the + controller will error out. + +**Note:** that for a publicly accessible object storage, you don't need to +provide a `secretRef` nor `serviceAccountName`. + +**Important:** `.spec.secretRef` and `.spec.serviceAccountName` are mutually +exclusive and cannot be set at the same time. This constraint is enforced +at the CRD level. + +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + +### Prefix + +`.spec.prefix` is an optional field to enable server-side filtering +of files in the Bucket. + +**Note:** The server-side filtering works only with the `generic`, `aws` +and `gcp` [provider](#provider) and is preferred over [`.spec.ignore`](#ignore) +as a more efficient way of excluding files. + +### Ignore + +`.spec.ignore` is an optional field to specify rules in [the `.gitignore` +pattern format](https://git-scm.com/docs/gitignore#_pattern_format). Storage +objects which keys match the defined rules are excluded while fetching. + +When specified, `.spec.ignore` overrides the [default exclusion +list](#default-exclusions), and may overrule the [`.sourceignore` file +exclusions](#sourceignore-file). See [excluding files](#excluding-files) +for more information. + +### Suspend + +`.spec.suspend` is an optional field to suspend the reconciliation of a Bucket. +When set to `true`, the controller will stop reconciling the Bucket, and changes +to the resource or in the object storage bucket will not result in a new +Artifact. When the field is set to `false` or removed, it will resume. + +For practical information, see +[suspending and resuming](#suspending-and-resuming). + +## Working with Buckets + +### Excluding files + +By default, storage bucket objects which match the [default exclusion +rules](#default-exclusions) are excluded while fetching. It is possible to +overwrite and/or overrule the default exclusions using a file in the bucket +and/or an in-spec set of rules. + +#### `.sourceignore` file + +Excluding files is possible by adding a `.sourceignore` file in the root of the +object storage bucket. The `.sourceignore` file follows [the `.gitignore` +pattern format](https://git-scm.com/docs/gitignore#_pattern_format), and +pattern entries may overrule [default exclusions](#default-exclusions). + +#### Ignore spec + +Another option is to define the exclusions within the Bucket spec, using the +[`.spec.ignore` field](#ignore). Specified rules override the +[default exclusion list](#default-exclusions), and may overrule `.sourceignore` +file exclusions. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: +spec: + ignore: | + # exclude all + /* + # include deploy dir + !/deploy + # exclude file extensions from deploy dir + /deploy/**/*.md + /deploy/**/*.txt +``` + +### Triggering a reconcile + +To manually tell the source-controller to reconcile a Bucket outside the +[specified interval window](#interval), a Bucket can be annotated with +`reconcile.fluxcd.io/requestedAt: `. Annotating the resource +queues the Bucket for reconciliation if the `` differs from +the last value the controller acted on, as reported in +[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at). + +Using `kubectl`: + +```sh +kubectl annotate --field-manager=flux-client-side-apply --overwrite bucket/ reconcile.fluxcd.io/requestedAt="$(date +%s)" +``` + +Using `flux`: + +```sh +flux reconcile source bucket +``` + +### Waiting for `Ready` + +When a change is applied, it is possible to wait for the Bucket to reach a +[ready state](#ready-bucket) using `kubectl`: + +```sh +kubectl wait bucket/ --for=condition=ready --timeout=1m +``` + +### Suspending and resuming + +When you find yourself in a situation where you temporarily want to pause the +reconciliation of a Bucket, you can suspend it using the [`.spec.suspend` +field](#suspend). + +#### Suspend a Bucket + +In your YAML declaration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: +spec: + suspend: true +``` + +Using `kubectl`: + +```sh +kubectl patch bucket --field-manager=flux-client-side-apply -p '{\"spec\": {\"suspend\" : true }}' +``` + +Using `flux`: + +```sh +flux suspend source bucket +``` + +**Note:** When a Bucket has an Artifact and is suspended, and this Artifact +later disappears from the storage due to e.g. the source-controller Pod being +evicted from a Node, this will not be reflected in the Bucket's Status until it +is resumed. + +#### Resume a Bucket + +In your YAML declaration, comment out (or remove) the field: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: +spec: + # suspend: true +``` + +**Note:** Setting the field value to `false` has the same effect as removing +it, but does not allow for "hot patching" using e.g. `kubectl` while practicing +GitOps; as the manually applied patch would be overwritten by the declared +state in Git. + +Using `kubectl`: + +```sh +kubectl patch bucket --field-manager=flux-client-side-apply -p '{\"spec\" : {\"suspend\" : false }}' +``` + +Using `flux`: + +```sh +flux resume source bucket +``` + +### Debugging a Bucket + +There are several ways to gather information about a Bucket for debugging +purposes. + +#### Describe the Bucket + +Describing a Bucket using `kubectl describe bucket ` displays the +latest recorded information for the resource in the `Status` and `Events` +sections: + +```console +... +Status: +... + Conditions: + Last Transition Time: 2024-02-02T13:26:55Z + Message: processing object: new generation 1 -> 2 + Observed Generation: 2 + Reason: ProgressingWithRetry + Status: True + Type: Reconciling + Last Transition Time: 2024-02-02T13:26:55Z + Message: bucket 'my-new-bucket' does not exist + Observed Generation: 2 + Reason: BucketOperationFailed + Status: False + Type: Ready + Last Transition Time: 2024-02-02T13:26:55Z + Message: bucket 'my-new-bucket' does not exist + Observed Generation: 2 + Reason: BucketOperationFailed + Status: True + Type: FetchFailed + Observed Generation: 1 + URL: http://source-controller.source-system.svc.cluster.local./bucket/default/minio-bucket/latest.tar.gz +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning BucketOperationFailed 37s (x11 over 42s) source-controller bucket 'my-new-bucket' does not exist +``` + +#### Trace emitted Events + +To view events for specific Bucket(s), `kubectl events` can be used in +combination with `--for` to list the Events for specific objects. For example, +running + +```sh +kubectl events --for Bucket/ +``` + +lists + +```console +LAST SEEN TYPE REASON OBJECT MESSAGE +2m30s Normal NewArtifact bucket/ fetched 16 files with revision from 'my-new-bucket' +36s Normal ArtifactUpToDate bucket/ artifact up-to-date with remote revision: 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' +18s Warning BucketOperationFailed bucket/ bucket 'my-new-bucket' does not exist +``` + +Besides being reported in Events, the reconciliation errors are also logged by +the controller. The Flux CLI offer commands for filtering the logs for a +specific Bucket, e.g. `flux logs --level=error --kind=Bucket --name=`. + +## Bucket Status + +### Artifact + +The Bucket reports the latest synchronized state from the object storage +bucket as an Artifact object in the `.status.artifact` of the resource. + +The Artifact file is a gzip compressed TAR archive +(`.tar.gz`), and can be retrieved in-cluster from the +`.status.artifact.url` HTTP address. + +#### Artifact example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: +status: + artifact: + digest: sha256:cbec34947cc2f36dee8adcdd12ee62ca6a8a36699fc6e56f6220385ad5bd421a + lastUpdateTime: "2024-01-28T10:30:30Z" + path: bucket///c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz + revision: sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2 + size: 38099 + url: http://source-controller..svc.cluster.local./bucket///c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz +``` + +#### Default exclusions + +The following files and extensions are excluded from the Artifact by +default: + +- Git files (`.git/, .gitignore, .gitmodules, .gitattributes`) +- File extensions (`.jpg, .jpeg, .gif, .png, .wmv, .flv, .tar.gz, .zip`) +- CI configs (`.github/, .circleci/, .travis.yml, .gitlab-ci.yml, appveyor.yml, .drone.yml, cloudbuild.yaml, codeship-services.yml, codeship-steps.yml`) +- CLI configs (`.goreleaser.yml, .sops.yaml`) +- Flux v1 config (`.flux.yaml`) + +To define your own exclusion rules, see [excluding files](#excluding-files). + +### Conditions + +A Bucket enters various states during its lifecycle, reflected as +[Kubernetes Conditions][typical-status-properties]. +It can be [reconciling](#reconciling-bucket) while fetching storage objects, +it can be [ready](#ready-bucket), or it can [fail during +reconciliation](#failed-bucket). + +The Bucket API is compatible with the [kstatus specification][kstatus-spec], +and reports `Reconciling` and `Stalled` conditions where applicable to +provide better (timeout) support to solutions polling the Bucket to become +`Ready`. + +#### Reconciling Bucket + +The source-controller marks a Bucket as _reconciling_ when one of the following +is true: + +- There is no current Artifact for the Bucket, or the reported Artifact is + determined to have disappeared from the storage. +- The generation of the Bucket is newer than the [Observed Generation](#observed-generation). +- The newly calculated Artifact revision differs from the current Artifact. + +When the Bucket is "reconciling", the `Ready` Condition status becomes +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the Bucket's `.status.conditions`: + +- `type: Reconciling` +- `status: "True"` +- `reason: Progressing` | `reason: ProgressingWithRetry` + +If the reconciling state is due to a new revision, an additional Condition is +added with the following attributes: + +- `type: ArtifactOutdated` +- `status: "True"` +- `reason: NewRevision` + +Both Conditions have a ["negative polarity"][typical-status-properties], +and are only present on the Bucket while their status value is `"True"`. + +#### Ready Bucket + +The source-controller marks a Bucket as _ready_ when it has the following +characteristics: + +- The Bucket reports an [Artifact](#artifact). +- The reported Artifact exists in the controller's Artifact storage. +- The Bucket was able to communicate with the Bucket's object storage endpoint + using the current spec. +- The revision of the reported Artifact is up-to-date with the latest + calculated revision of the object storage bucket. + +When the Bucket is "ready", the controller sets a Condition with the following +attributes in the Bucket's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +This `Ready` Condition will retain a status value of `"True"` until the Bucket +is marked as [reconciling](#reconciling-bucket), or e.g. a +[transient error](#failed-bucket) occurs due to a temporary network issue. + +When the Bucket Artifact is archived in the controller's Artifact +storage, the controller sets a Condition with the following attributes in the +Bucket's `.status.conditions`: + +- `type: ArtifactInStorage` +- `status: "True"` +- `reason: Succeeded` + +This `ArtifactInStorage` Condition will retain a status value of `"True"` until +the Artifact in the storage no longer exists. + +#### Failed Bucket + +The source-controller may get stuck trying to produce an Artifact for a Bucket +without completing. This can occur due to some of the following factors: + +- The object storage [Endpoint](#endpoint) is temporarily unavailable. +- The specified object storage bucket does not exist. +- The [Secret reference](#secret-reference) contains a reference to a + non-existing Secret. +- The credentials in the referenced Secret are invalid. +- The Bucket spec contains a generic misconfiguration. +- A storage related failure when storing the artifact. + +When this happens, the controller sets the `Ready` Condition status to `False`, +and adds a Condition with the following attributes to the Bucket's +`.status.conditions`: + +- `type: FetchFailed` | `type: StorageOperationFailed` +- `status: "True"` +- `reason: AuthenticationFailed` | `reason: BucketOperationFailed` + +This condition has a ["negative polarity"][typical-status-properties], +and is only present on the Bucket while the status value is `"True"`. +There may be more arbitrary values for the `reason` field to provide accurate +reason for a condition. + +While the Bucket has this Condition, the controller will continue to attempt +to produce an Artifact for the resource with an exponential backoff, until +it succeeds and the Bucket is marked as [ready](#ready-bucket). + +Note that a Bucket can be [reconciling](#reconciling-bucket) while failing at +the same time, for example due to a newly introduced configuration issue in the +Bucket spec. When a reconciliation fails, the `Reconciling` Condition reason +would be `ProgressingWithRetry`. When the reconciliation is performed again +after the failure, the reason is updated to `Progressing`. + +### Observed Ignore + +The source-controller reports an observed ignore in the Bucket's +`.status.observedIgnore`. The observed ignore is the latest `.spec.ignore` value +which resulted in a [ready state](#ready-bucket), or stalled due to error +it can not recover from without human intervention. The value is the same as the +[ignore in spec](#ignore). It indicates the ignore rules used in building the +current artifact in storage. + +Example: +```yaml +status: + ... + observedIgnore: | + hpa.yaml + build + ... +``` + +### Observed Generation + +The source-controller reports an +[observed generation][typical-status-properties] +in the Bucket's `.status.observedGeneration`. The observed generation is the +latest `.metadata.generation` which resulted in either a [ready state](#ready-bucket), +or stalled due to error it can not recover from without human +intervention. + +### Last Handled Reconcile At + +The source-controller reports the last `reconcile.fluxcd.io/requestedAt` +annotation value it acted on in the `.status.lastHandledReconcileAt` field. + +For practical information about this field, see [triggering a +reconcile](#triggering-a-reconcile). + +[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus diff --git a/docs/spec/v1/externalartifacts.md b/docs/spec/v1/externalartifacts.md new file mode 100644 index 000000000..1eccbe0e0 --- /dev/null +++ b/docs/spec/v1/externalartifacts.md @@ -0,0 +1,114 @@ +# External Artifacts + + + +The `ExternalArtifact` is a generic API designed for interoperability with Flux. +It allows 3rd party controllers to produce and store [Artifact](#artifact) objects +in the same way as Flux's own source-controller. +For more details on the design and motivation behind this API, +see [RFC-0012](https://github.com/fluxcd/flux2/tree/main/rfcs/0012-external-artifact). + +## Example + +The following is an example of a ExternalArtifact produced by a 3rd party +source controller: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: ExternalArtifact +metadata: + name: my-artifact + namespace: flux-system +spec: + sourceRef: + apiVersion: example.com/v1 + kind: Source + name: my-source +status: + artifact: + digest: sha256:35d47c9db0eee6ffe08a404dfb416bee31b2b79eabc3f2eb26749163ce487f52 + lastUpdateTime: "2025-08-21T13:37:31Z" + path: source/flux-system/my-source/35d47c9d.tar.gz + revision: v1.0.0@sha256:35d47c9db0eee6ffe08a404dfb416bee31b2b79eabc3f2eb26749163ce487f52 + size: 20914 + url: http://example-controller.flux-system.svc.cluster.local./source/flux-system/my-source/35d47c9d.tar.gz + conditions: + - lastTransitionTime: "2025-08-21T13:37:31Z" + message: stored artifact for revision v1.0.0 + observedGeneration: 1 + reason: Succeeded + status: "True" + type: Ready +``` + +## ExternalArtifact spec + +### Source reference + +The `spec.sourceRef` field is optional and contains a reference +to the custom resource that the ExternalArtifact is based on. + +The `spec.sourceRef` contains the following fields: + +- `apiVersion`: the API version of the custom resource. +- `kind`: the kind of the custom resource. +- `name`: the name of the custom resource. +- `namespace`: the namespace of the custom resource. If omitted, it defaults to the + namespace of the ExternalArtifact. + +## ExternalArtifact status + +### Artifact + +The ExternalArtifact reports the latest synchronized state +as an Artifact object in the `.status.artifact`. + +The `.status.artifact` contains the following fields: + +- `digest`: The checksum of the tar.gz file in the format `:`. +- `lastUpdateTime`: Timestamp of the last artifact update. +- `path`: Relative file path of the artifact in storage. +- `revision`: Human-readable identifier with version and checksum in the format `@:`. +- `size`: Number of bytes in the tar.gz file. +- `url`: In-cluster HTTP address for artifact retrieval. + +### Conditions + +The ExternalArtifact reports its status using Kubernetes standard conditions. + +#### Ready ExternalArtifact + +When the 3rd party controller has successfully produced and stored an +Artifact in storage, it sets a Condition with the following +attributes in the ExternalArtifact's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +The `message` field should contain a human-readable message indicating +the successful storage of the artifact and the associated revision. + +If the 3rd party controller performs a signature verification +of the artifact, and the verification is successful, a Condition with the +following attributes is added to the ExternalArtifact's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +The `message` field should contain a human-readable message indicating +the successful verification of the artifact and the associated verification method. + +#### Failed ExternalArtifact + +If the 3rd party controller fails to produce and store an Artifact, +it sets the `Ready` Condition status to `False`, and adds a Condition with +the following attributes to the ExternalArtifact's `.status.conditions`: + +- `type: Ready` +- `status: "False"` +- `reason: FetchFailed` | `reason: StorageOperationFailed` | `reason: VerificationFailed` + +The `message` field should contain a human-readable message indicating +the reason for the failure. diff --git a/docs/spec/v1/gitrepositories.md b/docs/spec/v1/gitrepositories.md index 4170d9f1b..d39ee73d3 100644 --- a/docs/spec/v1/gitrepositories.md +++ b/docs/spec/v1/gitrepositories.md @@ -177,6 +177,31 @@ data: ca.crt: ``` +#### HTTPS Mutual TLS authentication + +To authenticate towards a Git repository over HTTPS using mutual TLS, +the referenced Secret's `.data` should contain the following keys: + +* `tls.crt` and `tls.key`, to specify the client certificate and private key used + for TLS client authentication. These must be used in conjunction, i.e. + specifying one without the other will lead to an error. +* `ca.crt`, to specify the CA certificate used to verify the server, which is + required if the server is using a self-signed certificate. + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: https-tls-certs + namespace: default +type: Opaque +data: + tls.crt: + tls.key: + ca.crt: +``` + #### SSH authentication To authenticate towards a Git repository over SSH, the referenced Secret is @@ -212,6 +237,180 @@ For password-protected SSH private keys, the password must be provided via an additional `password` field in the secret. Flux CLI also supports this via the `--password` flag. +### Provider + +`.spec.provider` is an optional field that allows specifying an OIDC provider +used for authentication purposes. + +Supported options are: + +- `generic` +- `azure` +- `github` + +When provider is not specified, it defaults to `generic` indicating that +mechanisms using `spec.secretRef` are used for authentication. + +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + +#### Azure + +The `azure` provider can be used to authenticate to Azure DevOps repositories +automatically using Workload Identity. + +##### Pre-requisites + +- Ensure that your Azure DevOps Organization is + [connected](https://learn.microsoft.com/en-us/azure/devops/organizations/accounts/connect-organization-to-azure-ad?view=azure-devops) + to Microsoft Entra. +- Ensure Workload Identity is properly [set up on your + cluster](https://learn.microsoft.com/en-us/azure/aks/workload-identity-deploy-cluster#create-an-aks-cluster). + +##### Configure Flux controller + +- Create a managed identity to access Azure DevOps. Establish a federated + identity credential between the managed identity and the source-controller + service account. In the default installation, the source-controller service + account is located in the `flux-system` namespace with name + `source-controller`. Ensure the federated credential uses the correct + namespace and name of the source-controller service account. For more details, + please refer to this + [guide](https://azure.github.io/azure-workload-identity/docs/quick-start.html#6-establish-federated-identity-credential-between-the-identity-and-the-service-account-issuer--subject). + +- Add the managed identity to the Azure DevOps organization as a user. Ensure + that the managed identity has the necessary permissions to access the Azure + DevOps repository as described + [here](https://learn.microsoft.com/en-us/azure/devops/integrate/get-started/authentication/service-principal-managed-identity?view=azure-devops#2-add-and-manage-service-principals-in-an-azure-devops-organization). + +- Add the following patch to your bootstrap repository in + `flux-system/kustomization.yaml` file: + + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: |- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + namespace: flux-system + annotations: + azure.workload.identity/client-id: + labels: + azure.workload.identity/use: "true" + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: source-controller + namespace: flux-system + labels: + azure.workload.identity/use: "true" + spec: + template: + metadata: + labels: + azure.workload.identity/use: "true" +``` + +**Note:** When azure `provider` is used with `GitRepository`, the `.spec.url` +must follow this format: + +``` +https://dev.azure.com/{your-organization}/{your-project}/_git/{your-repository} +``` +#### GitHub + +The `github` provider can be used to authenticate to Git repositories using +[GitHub Apps](https://docs.github.com/en/apps/overview). + +##### Pre-requisites + +- [Register](https://docs.github.com/en/apps/creating-github-apps/registering-a-github-app/registering-a-github-app) + the GitHub App with the necessary permissions and [generate a private + key](https://docs.github.com/en/apps/creating-github-apps/authenticating-with-a-github-app/managing-private-keys-for-github-apps) + for the app. + +- [Install](https://docs.github.com/en/apps/using-github-apps/installing-your-own-github-app) + the app in the organization/account configuring access to the necessary + repositories. + +##### Configure GitHub App secret + +The GitHub App information is specified in `.spec.secretRef` in the format +specified below: + +- Get the App ID from the app settings page at + `https://github.com/settings/apps/`. +- Get the App Installation ID from the app installations page at +`https://github.com/settings/installations`. Click the installed app, the URL +will contain the installation ID +`https://github.com/settings/installations/`. For +organizations, the first part of the URL may be different, but it follows the +same pattern. +- The private key that was generated in the pre-requisites. +- (Optional) GitHub Enterprise Server users can set the base URL to + `http(s)://HOSTNAME/api/v3`. +- (Optional) If GitHub Enterprise Server uses a private CA, include its bundle (root and any intermediates) in `ca.crt`. + If the `ca.crt` is specified, then it will be used for TLS verification for all API / Git over `HTTPS` requests to the GitHub Enterprise Server. + +**NOTE:** If the secret contains `tls.crt`, `tls.key` then [mutual TLS configuration](#https-mutual-tls-authentication) will be automatically enabled. +Omit these keys if the GitHub server does not support mutual TLS. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: github-sa +type: Opaque +stringData: + githubAppID: "" + githubAppInstallationID: "" + githubAppPrivateKey: | + -----BEGIN RSA PRIVATE KEY----- + ... + -----END RSA PRIVATE KEY----- + githubAppBaseURL: "" #optional, required only for GitHub Enterprise Server users + ca.crt: | #optional, for GitHub Enterprise Server users + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- +``` + +Alternatively, the Flux CLI can be used to automatically create the secret with +the github app authentication information. + +```sh +flux create secret githubapp ghapp-secret \ + --app-id=1 \ + --app-installation-id=3 \ + --app-private-key=~/private-key.pem +``` + +### Service Account reference + +`.spec.serviceAccountName` is an optional field to specify a Service Account +in the same namespace as GitRepository with purpose depending on the value of +the `.spec.provider` field: + +- When `.spec.provider` is set to `azure`, the Service Account + will be used for Workload Identity authentication. In this case, the controller + feature gate `ObjectLevelWorkloadIdentity` must be enabled, otherwise the + controller will error out. For Azure DevOps specific setup, see the + [Azure DevOps integration guide](https://fluxcd.io/flux/integrations/azure/#for-azure-devops). + +**Note:** that for a publicly accessible git repository, you don't need to +provide a `secretRef` nor `serviceAccountName`. + +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + ### Interval `.spec.interval` is a required field that specifies the interval at which the @@ -446,6 +645,28 @@ list](#default-exclusions), and may overrule the [`.sourceignore` file exclusions](#sourceignore-file). See [excluding files](#excluding-files) for more information. +### Sparse checkout + +`.spec.sparseCheckout` is an optional field to specify list of directories to +checkout when cloning the repository. If specified, only the specified directory +contents will be present in the artifact produced for this repository. + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m + url: https://github.com/stefanprodan/podinfo + ref: + branch: master + sparseCheckout: + - charts + - kustomize +``` + ### Suspend `.spec.suspend` is an optional field to suspend the reconciliation of a @@ -988,6 +1209,27 @@ status: ... ``` +### Observed Sparse Checkout + +The source-controller reports observed sparse checkout in the GitRepository's +`.status.observedSparseCheckout`. The observed sparse checkout is the latest +`.spec.sparseCheckout` value which resulted in a [ready +state](#ready-gitrepository), or stalled due to error it can not recover from +without human intervention. The value is the same as the [sparseCheckout in +spec](#sparse-checkout). It indicates the sparse checkout configuration used in +building the current artifact in storage. It is also used by the controller to +determine if an artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedSparseCheckout: + - charts + - kustomize + ... +``` + ### Source Verification Mode The source-controller reports the Git object(s) it verified in the Git diff --git a/docs/spec/v1/helmcharts.md b/docs/spec/v1/helmcharts.md new file mode 100644 index 000000000..eae4d5b9c --- /dev/null +++ b/docs/spec/v1/helmcharts.md @@ -0,0 +1,865 @@ +# Helm Charts + + + +The `HelmChart` API defines a Source to produce an Artifact for a Helm chart +archive with a set of specific configurations. + +## Example + +The following is an example of a HelmChart. It fetches and/or packages a Helm +chart and exposes it as a tarball (`.tgz`) Artifact for the specified +configuration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + chart: podinfo + reconcileStrategy: ChartVersion + sourceRef: + kind: HelmRepository + name: podinfo + version: '5.*' +``` + +In the above example: + +- A HelmChart named `podinfo` is created, indicated by the `.metadata.name` + field. +- The source-controller fetches the Helm chart every five minutes from the + `podinfo` HelmRepository source reference, indicated by the + `.spec.sourceRef.kind` and `.spec.sourceRef.name` fields. +- The fetched Helm chart version is the latest available chart + version in the range specified in `spec.version`. This version is also used as + Artifact revision, reported in-cluster in the `.status.artifact.revision` + field. +- When the current Helm Chart version differs from the latest available chart + in the version range, it is fetched and/or packaged as a new Artifact. +- The new Artifact is reported in the `.status.artifact` field. + +You can run this example by saving the manifest into `helmchart.yaml`. + +**Note:** HelmChart is usually used by the helm-controller. Based on the +HelmRelease configuration, an associated HelmChart is created by the +helm-controller. + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f helmchart.yaml + ``` + +2. Run `kubectl get helmchart` to see the HelmChart: + + ```console + NAME CHART VERSION SOURCE KIND SOURCE NAME AGE READY STATUS + podinfo podinfo 5.* HelmRepository podinfo 53s True pulled 'podinfo' chart with version '5.2.1' + ``` + +3. Run `kubectl describe helmchart podinfo` to see the [Artifact](#artifact) and + [Conditions](#conditions) in the HelmChart's Status: + + ```console + Status: + Observed Source Artifact Revision: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + Artifact: + Digest: sha256:6c3cc3b955bce1686036ae6822ee2ca0ef6ecb994e3f2d19eaf3ec03dcba84b3 + Last Update Time: 2022-02-13T11:24:10Z + Path: helmchart/default/podinfo/podinfo-5.2.1.tgz + Revision: 5.2.1 + Size: 14166 + URL: http://source-controller.flux-system.svc.cluster.local./helmchart/default/podinfo/podinfo-5.2.1.tgz + Conditions: + Last Transition Time: 2022-02-13T11:24:10Z + Message: pulled 'podinfo' chart with version '5.2.1' + Observed Generation: 1 + Reason: ChartPullSucceeded + Status: True + Type: Ready + Last Transition Time: 2022-02-13T11:24:10Z + Message: pulled 'podinfo' chart with version '5.2.1' + Observed Generation: 1 + Reason: ChartPullSucceeded + Status: True + Type: ArtifactInStorage + Observed Chart Name: podinfo + Observed Generation: 1 + URL: http://source-controller.flux-system.svc.cluster.local./helmchart/default/podinfo/latest.tar.gz + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ChartPullSucceeded 2m51s source-controller pulled 'podinfo' chart with version '5.2.1' + ``` + +## Writing a HelmChart spec + +As with all other Kubernetes config, a HelmChart needs `apiVersion`, `kind`, and +`metadata` fields. The name of a HelmChart object must be a valid +[DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +A HelmChart also needs a +[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### Source reference + +`.spec.sourceRef` is a required field that specifies a reference to the Source +the chart is available at. + +Supported references are: +- [`HelmRepository`](helmrepositories.md) +- [`GitRepository`](gitrepositories.md) +- [`Bucket`](buckets.md) + +Although there are three kinds of source references, there are only two +underlying implementations. The artifact building process for `GitRepository` +and `Bucket` are the same as they are already built source artifacts. In case +of `HelmRepository`, a chart is fetched and/or packaged based on the +configuration of the Helm chart. + +For a `HelmChart` to be reconciled, the associated artifact in the source +reference must be ready. If the source artifact is not ready, the `HelmChart` +reconciliation is retried. + +When the `metadata.generation` of the `HelmChart` don't match with the +`status.observedGeneration`, the chart is fetched from source and/or packaged. +If there's no `.spec.valuesFiles` specified, the chart is only fetched from the +source, and not packaged. If `.spec.valuesFiles` are specified, the chart is +fetched and packaged with the values files. When the `metadata.generation` +matches the `status.observedGeneration`, the chart is only fetched from source +or from the cache if available, and not packaged. + +When using a `HelmRepository` source reference, the secret reference defined in +the Helm repository is used to fetch the chart. + +The HelmChart reconciliation behavior varies depending on the source reference +kind, see [reconcile strategy](#reconcile-strategy). + +The attributes of the generated artifact also varies depending on the source +reference kind, see [artifact](#artifact). + +### Chart + +`.spec.chart` is a required field that specifies the name or path the Helm chart +is available at in the [Source reference](#source-reference). + +For `HelmRepository` Source reference, it'll be just the name of the chart. + +```yaml +spec: + chart: podinfo + sourceRef: + name: podinfo + kind: HelmRepository +``` + +For `GitRepository` and `Bucket` Source reference, it'll be the path to the +Helm chart directory. + +```yaml +spec: + chart: ./charts/podinfo + sourceRef: + name: podinfo + kind: +``` + +### Version + +`.spec.version` is an optional field to specify the version of the chart in +semver. It is applicable only when the Source reference is a `HelmRepository`. +It is ignored for `GitRepository` and `Bucket` Source reference. It defaults to +the latest version of the chart with value `*`. + +Version can be a fixed semver, minor or patch semver range of a specific +version (i.e. `4.0.x`) or any semver range (i.e. `>=4.0.0 <5.0.0`). + +### Values files + +`.spec.valuesFiles` is an optional field to specify an alternative list of +values files to use as the chart values (values.yaml). The file paths are +expected to be relative to the Source reference. Values files are merged in the +order of the list with the last file overriding the first. It is ignored when +omitted. When values files are specified, the chart is fetched and packaged +with the provided values. + +```yaml +spec: + chart: + spec: + chart: podinfo + ... + valuesFiles: + - values.yaml + - values-production.yaml +``` + +Values files also affect the generated artifact revision, see +[artifact](#artifact). + +### Ignore missing values files + +`.spec.ignoreMissingValuesFiles` is an optional field to specify whether missing +values files should be ignored rather than be considered errors. It defaults to +`false`. + +When `.spec.valuesFiles` and `.spec.ignoreMissingValuesFiles` are specified, +the `.status.observedValuesFiles` field is populated with the list of values +files that were found and actually contributed to the packaged chart. + +### Reconcile strategy + +`.spec.reconcileStrategy` is an optional field to specify what enables the +creation of a new Artifact. Valid values are `ChartVersion` and `Revision`. +`ChartVersion` is used for creating a new artifact when the chart version +changes in a `HelmRepository`. `Revision` is used for creating a new artifact +when the source revision changes in a `GitRepository` or a `Bucket` Source. It +defaults to `ChartVersion`. + +**Note:** If the reconcile strategy is `ChartVersion` and the source reference +is a `GitRepository` or a `Bucket`, no new chart artifact is produced on updates +to the source unless the `version` in `Chart.yaml` is incremented. To produce +new chart artifact on change in source revision, set the reconcile strategy to +`Revision`. + +Reconcile strategy also affects the artifact version, see [artifact](#artifact) +for more details. + +### Interval + +`.spec.interval` is a required field that specifies the interval at which the +Helm Chart source must be checked for updates. + +After successfully reconciling a HelmChart object, the source-controller +requeues the object for inspection after the specified interval. The value must +be in a [Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `10m0s` to look at the source for updates every 10 minutes. + +If the `.metadata.generation` of a resource changes (due to e.g. applying a +change to the spec), this is handled instantly outside the interval window. + +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple HelmChart objects are set +up with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + +### Suspend + +`.spec.suspend` is an optional field to suspend the reconciliation of a +HelmChart. When set to `true`, the controller will stop reconciling the +HelmChart, and changes to the resource or the Helm chart Source will not result +in a new Artifact. When the field is set to `false` or removed, it will resume. + +For practical information, see +[suspending and resuming](#suspending-and-resuming). + +### Verification + +**Note:** This feature is available only for Helm charts fetched from an OCI Registry. + +`.spec.verify` is an optional field to enable the verification of [Cosign](https://github.com/sigstore/cosign) or [Notation](https://github.com/notaryproject/notation) +signatures. The field offers three subfields: + +- `.provider`, to specify the verification provider. The supported options are `cosign` and `notation` at present. +- `.secretRef.name`, to specify a reference to a Secret in the same namespace as + the HelmChart, containing the public keys of trusted authors. For Notation this Secret should also include the [trust policy](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/trust-store-trust-policy.md#trust-policy) in + addition to the CA certificate. +- `.matchOIDCIdentity`, to specify a list of OIDC identity matchers (only supported when using `cosign` as the verification provider). Please see + [Keyless verification](#keyless-verification) for more details. + +#### Cosign + +The `cosign` provider can be used to verify the signature of an OCI artifact using either a known public key or via the [Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: podinfo +spec: + verify: + provider: cosign + secretRef: + name: cosign-public-keys +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the HelmChart's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +##### Public keys verification + +To verify the authenticity of HelmChart hosted in an OCI Registry, create a Kubernetes +secret with the Cosign public keys: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: cosign-public-keys +type: Opaque +data: + key1.pub: + key2.pub: +``` + +Note that the keys must have the `.pub` extension for Flux to make use of them. + +Flux will loop over the public keys and use them to verify a HelmChart's signature. +This allows for older HelmCharts to be valid as long as the right key is in the secret. + +##### Keyless verification + +For publicly available HelmCharts, which are signed using the +[Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure, +you can enable the verification by omitting the `.verify.secretRef` field. + +To verify the identity's subject and the OIDC issuer present in the Fulcio +certificate, you can specify a list of OIDC identity matchers using +`.spec.verify.matchOIDCIdentity`. The matcher provides two required fields: + +- `.issuer`, to specify a regexp that matches against the OIDC issuer. +- `.subject`, to specify a regexp that matches against the subject identity in + the certificate. +Both values should follow the [Go regular expression syntax](https://golang.org/s/re2syntax). + +The matchers are evaluated in an OR fashion, i.e. the identity is deemed to be +verified if any one matcher successfully matches against the identity. + +Example of verifying HelmCharts signed by the +[Cosign GitHub Action](https://github.com/sigstore/cosign-installer) with GitHub OIDC Token: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: podinfo +spec: + interval: 5m + chart: podinfo + reconcileStrategy: ChartVersion + sourceRef: + kind: HelmRepository + name: podinfo + version: ">=6.1.6" + verify: + provider: cosign + matchOIDCIdentity: + - issuer: "^https://token.actions.githubusercontent.com$" + subject: "^https://github.com/stefanprodan/podinfo.*$" +``` + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: podinfo +spec: + interval: 1m0s + url: oci://ghcr.io/stefanprodan/charts + type: "oci" +``` + +The controller verifies the signatures using the Fulcio root CA and the Rekor +instance hosted at [rekor.sigstore.dev](https://rekor.sigstore.dev/). + +Note that keyless verification is an **experimental feature**, using +custom root CAs or self-hosted Rekor instances are not currently supported. + +#### Notation + +The `notation` provider can be used to verify the signature of an OCI artifact using known +trust policy and CA certificate. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: podinfo +spec: + verify: + provider: notation + secretRef: + name: notation-config +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the HelmChart's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +To verify the authenticity of an OCI artifact, create a Kubernetes secret +containing Certificate Authority (CA) root certificates and the a `trust policy` + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: notation-config +type: Opaque +data: + certificate1.pem: + certificate2.crt: + trustpolicy.json: +``` + +Note that the CA certificates must have either `.pem` or `.crt` extension and your trust policy must +be named `trustpolicy.json` for Flux to make use of them. + +For more information on the signing and verification process see [Signing and Verification Workflow](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/signing-and-verification-workflow.md). + +Flux will loop over the certificates and use them to verify an artifact's signature. +This allows for older artifacts to be valid as long as the right certificate is in the secret. + +## Working with HelmCharts + +### Triggering a reconcile + +To manually tell the source-controller to reconcile a HelmChart outside the +[specified interval window](#interval), a HelmCHart can be annotated with +`reconcile.fluxcd.io/requestedAt: `. Annotating the resource +queues the object for reconciliation if the `` differs from +the last value the controller acted on, as reported in +[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at). + +Using `kubectl`: + +```sh +kubectl annotate --field-manager=flux-client-side-apply --overwrite helmchart/ reconcile.fluxcd.io/requestedAt="$(date +%s)" +``` + +### Waiting for `Ready` + +When a change is applied, it is possible to wait for the HelmChart to reach a +[ready state](#ready-helmchart) using `kubectl`: + +```sh +kubectl wait helmchart/ --for=condition=ready --timeout=1m +``` + +### Suspending and resuming + +When you find yourself in a situation where you temporarily want to pause the +reconciliation of a HelmChart, you can suspend it using the +[`.spec.suspend` field](#suspend). + +#### Suspend a HelmChart + +In your YAML declaration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: +spec: + suspend: true +``` + +Using `kubectl`: + +```sh +kubectl patch helmchart --field-manager=flux-client-side-apply -p '{\"spec\": {\"suspend\" : true }}' +``` + +**Note:** When a HelmChart has an Artifact and is suspended, and this +Artifact later disappears from the storage due to e.g. the source-controller +Pod being evicted from a Node, this will not be reflected in the +HelmChart's Status until it is resumed. + +#### Resume a HelmChart + +In your YAML declaration, comment out (or remove) the field: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: +spec: + # suspend: true +``` + +**Note:** Setting the field value to `false` has the same effect as removing +it, but does not allow for "hot patching" using e.g. `kubectl` while practicing +GitOps; as the manually applied patch would be overwritten by the declared +state in Git. + +Using `kubectl`: + +```sh +kubectl patch helmchart --field-manager=flux-client-side-apply -p '{\"spec\" : {\"suspend\" : false }}' +``` + +### Debugging a HelmChart + +There are several ways to gather information about a HelmChart for debugging +purposes. + +#### Describe the HelmChart + +Describing a HelmChart using `kubectl describe helmchart ` displays +the latest recorded information for the resource in the `Status` and `Events` +sections: + +```console +... +Status: +... + Conditions: + Last Transition Time: 2022-02-13T14:06:27Z + Message: invalid chart reference: failed to get chart version for remote reference: no 'podinfo' chart with version matching '9.*' found + Observed Generation: 3 + Reason: InvalidChartReference + Status: True + Type: Stalled + Last Transition Time: 2022-02-13T14:06:27Z + Message: invalid chart reference: failed to get chart version for remote reference: no 'podinfo' chart with version matching '9.*' found + Observed Generation: 3 + Reason: InvalidChartReference + Status: False + Type: Ready + Last Transition Time: 2022-02-13T14:06:27Z + Message: invalid chart reference: failed to get chart version for remote reference: no 'podinfo' chart with version matching '9.*' found + Observed Generation: 3 + Reason: InvalidChartReference + Status: True + Type: FetchFailed + Last Handled Reconcile At: 1644759954 + Observed Chart Name: podinfo + Observed Generation: 3 + URL: http://source-controller.flux-system.svc.cluster.local./helmchart/default/podinfo/latest.tar.gz +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning InvalidChartReference 11s source-controller invalid chart reference: failed to get chart version for remote reference: no 'podinfo' chart with ver +sion matching '9.*' found +``` + +#### Trace emitted Events + +To view events for specific HelmChart(s), `kubectl events` can be used in +combination with `--for` to list the Events for specific objects. For example, +running + +```sh +kubectl events --for HelmChart/ +``` + +lists + +```console +LAST SEEN TYPE REASON OBJECT MESSAGE +22s Warning InvalidChartReference helmchart/ invalid chart reference: failed to get chart version for remote reference: no 'podinfo' chart with version matching '9.*' found +2s Normal ChartPullSucceeded helmchart/ pulled 'podinfo' chart with version '6.0.3' +2s Normal ArtifactUpToDate helmchart/ artifact up-to-date with remote revision: '6.0.3' +``` + +Besides being reported in Events, the reconciliation errors are also logged by +the controller. The Flux CLI offer commands for filtering the logs for a +specific HelmChart, e.g. `flux logs --level=error --kind=HelmChart --name=`. + +### Improving resource consumption by enabling the cache + +When using a `HelmRepository` as Source for a `HelmChart`, the controller loads +the repository index in memory to find the latest version of the chart. + +The controller can be configured to cache Helm repository indexes in memory. +The cache is used to avoid loading repository indexes for every `HelmChart` +reconciliation. + +The following flags are provided to enable and configure the cache: +- `helm-cache-max-size`: The maximum size of the cache in number of indexes. + If `0`, then the cache is disabled. +- `helm-cache-ttl`: The TTL of an index in the cache. +- `helm-cache-purge-interval`: The interval at which the cache is purged of + expired items. + +The caching strategy is to pull a repository index from the cache if it is +available, otherwise to load the index, retrieve and build the chart, +then cache the index. The cached index TTL is refreshed every time the +Helm repository index is loaded with the `helm-cache-ttl` value. + +The cache is purged of expired items every `helm-cache-purge-interval`. + +When the cache is full, no more items can be added to the cache, and the +source-controller will report a warning event instead. + +In order to use the cache, set the related flags in the source-controller +Deployment config: + +```yaml + spec: + containers: + - args: + - --watch-all-namespaces + - --log-level=info + - --log-encoding=json + - --enable-leader-election + - --storage-path=/data + - --storage-adv-addr=source-controller.$(RUNTIME_NAMESPACE).svc.cluster.local. + ## Helm cache with up to 10 items, i.e. 10 indexes. + - --helm-cache-max-size=10 + ## TTL of an index is 1 hour. + - --helm-cache-ttl=1h + ## Purge expired index every 10 minutes. + - --helm-cache-purge-interval=10m +``` + +## HelmChart Status + +### Artifact + +The HelmChart reports the last built chart as an Artifact object in the +`.status.artifact` of the resource. + +The Artifact file is a gzip compressed TAR archive (`-.tgz`), +and can be retrieved in-cluster from the `.status.artifact.url` HTTP address. + +#### Artifact example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: +status: + artifact: + digest: sha256:e30b95a08787de69ffdad3c232d65cfb131b5b50c6fd44295f48a078fceaa44e + lastUpdateTime: "2022-02-10T18:53:47Z" + path: helmchart///-.tgz + revision: 6.0.3 + size: 14166 + url: http://source-controller.flux-system.svc.cluster.local./helmchart///-.tgz +``` + +When using a `HelmRepository` as the source reference and values files are +provided, the value of `status.artifact.revision` is the chart version combined +with the `HelmChart` object generation. For example, if the chart version is +`6.0.3` and the `HelmChart` object generation is `1`, the +`status.artifact.revision` value will be `6.0.3+1`. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: +status: + artifact: + digest: sha256:ee68224ded207ebb18a8e9730cf3313fa6bc1f31e6d8d3943ab541113559bb52 + lastUpdateTime: "2022-02-28T08:07:12Z" + path: helmchart///-6.0.3+1.tgz + revision: 6.0.3+1 + size: 14166 + url: http://source-controller.flux-system.svc.cluster.local./helmchart///-6.0.3+1.tgz + observedGeneration: 1 + ... +``` + +When using a `GitRepository` or a `Bucket` as the source reference and +`Revision` as the reconcile strategy, the value of `status.artifact.revision` is +the chart version combined with the first 12 characters of the revision of the +`GitRepository` or `Bucket`. For example if the chart version is `6.0.3` and the +revision of the `Bucket` is `4e5cbb7b97d00a8039b8810b90b922f4256fd3bd8f78b934b4892dae13f7ca87`, +the `status.artifact.revision` value will be `6.0.3+4e5cbb7b97d0`. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmChart +metadata: + name: +status: + artifact: + digest: sha256:8d1f0ac3f4b0e8759a32180086f17ac87ca04e5d46c356e67f97e97616ef4718 + lastUpdateTime: "2022-02-28T08:07:12Z" + path: helmchart///-6.0.3+4e5cbb7b97d0.tgz + revision: 6.0.3+4e5cbb7b97d0 + size: 14166 + url: http://source-controller.flux-system.svc.cluster.local./helmchart///-6.0.3+4e5cbb7b97d0.tgz +``` + +### Conditions + +A HelmChart enters various states during its lifecycle, reflected as [Kubernetes +Conditions][typical-status-properties]. +It can be [reconciling](#reconciling-helmchart) while fetching or building the +chart, it can be [ready](#ready-helmchart), it can +[fail during reconciliation](#failed-helmchart), or it can +[stall](#stalled-helmchart). + +The HelmChart API is compatible with the [kstatus +specification][kstatus-spec], +and reports `Reconciling` and `Stalled` conditions where applicable to +provide better (timeout) support to solutions polling the HelmChart to become +`Ready`. + +#### Reconciling HelmChart + +The source-controller marks a HelmChart as _reconciling_ when one of the +following is true: + +- There is no current Artifact for the HelmChart, or the reported Artifact is + determined to have disappeared from the storage. +- The generation of the HelmChart is newer than the [Observed + Generation](#observed-generation). +- The newly fetched Artifact revision differs from the current Artifact. + +When the HelmChart is "reconciling", the `Ready` Condition status becomes +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the HelmChart's `.status.conditions`: + +- `type: Reconciling` +- `status: "True"` +- `reason: Progressing` | `reason: ProgressingWithRetry` + +If the reconciling state is due to a new version, it adds an additional +Condition with the following attributes: + +- `type: ArtifactOutdated` +- `status: "True"` +- `reason: NewChart` + +Both Conditions have a ["negative polarity"][typical-status-properties], +and are only present on the HelmChart while their status value is `"True"`. + +#### Ready HelmChart + +The source-controller marks a HelmChart as _ready_ when it has the following +characteristics: + +- The HelmChart reports an [Artifact](#artifact). +- The reported Artifact exists in the controller's Artifact storage. +- The controller was able to fetch and build the Helm chart using the current + spec. +- The version/revision of the reported Artifact is up-to-date with the + latest version/revision of the Helm chart. + +When the HelmChart is "ready", the controller sets a Condition with the +following attributes in the HelmChart's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +This `Ready` Condition will retain a status value of `"True"` until the +HelmChart is marked as [reconciling](#reconciling-helmchart), or e.g. +a [transient error](#failed-helmchart) occurs due to a temporary network issue. + +When the HelmChart Artifact is archived in the controller's Artifact +storage, the controller sets a Condition with the following attributes in the +HelmChart's `.status.conditions`: + +- `type: ArtifactInStorage` +- `status: "True"` +- `reason: Succeeded` + +This `ArtifactInStorage` Condition will retain a status value of `"True"` until +the Artifact in the storage no longer exists. + +#### Failed HelmChart + +The source-controller may get stuck trying to produce an Artifact for a +HelmChart without completing. This can occur due to some of the following +factors: + +- The Helm chart Source is temporarily unavailable. +- The credentials in the [Source reference](#source-reference) Secret are + invalid. +- The HelmChart spec contains a generic misconfiguration. +- A storage related failure when storing the artifact. + +When this happens, the controller sets the `Ready` Condition status to `False`, +and adds a Condition with the following attributes to the HelmChart's +`.status.conditions`: + +- `type: FetchFailed` | `type: StorageOperationFailed` +- `status: "True"` +- `reason: AuthenticationFailed` | `reason: StorageOperationFailed` | `reason: URLInvalid` | `reason: IllegalPath` | `reason: Failed` + +This condition has a ["negative polarity"][typical-status-properties], +and is only present on the HelmChart while the status value is `"True"`. +There may be more arbitrary values for the `reason` field to provide accurate +reason for a condition. + +While the HelmChart has this Condition, the controller will continue to +attempt to produce an Artifact for the resource with an exponential backoff, +until it succeeds and the HelmChart is marked as [ready](#ready-helmchart). + +Note that a HelmChart can be [reconciling](#reconciling-helmchart) +while failing at the same time, for example due to a newly introduced +configuration issue in the HelmChart spec. When a reconciliation fails, the +`Reconciling` Condition reason would be `ProgressingWithRetry`. When the +reconciliation is performed again after the failure, the reason is updated to +`Progressing`. + +#### Stalled HelmChart + +The source-controller can mark a HelmChart as _stalled_ when it determines that +without changes to the spec, the reconciliation can not succeed. +For example because a HelmChart Version is set to a non-existing version. + +When this happens, the controller sets the same Conditions as when it +[fails](#failed-helmchart), but adds another Condition with the following +attributes to the HelmChart's `.status.conditions`: + +- `type: Stalled` +- `status: "True"` +- `reason: InvalidChartReference` + +While the HelmChart has this Condition, the controller will not requeue the +resource any further, and will stop reconciling the resource until a change to +the spec is made. + +### Observed Source Artifact Revision + +The source-controller reports the revision of the last +[Source reference's](#source-reference) Artifact the current chart was fetched +from in the HelmChart's `.status.observedSourceArtifactRevision`. It is used to +keep track of the source artifact revision and detect when a new source +artifact is available. + +### Observed Chart Name + +The source-controller reports the last resolved chart name of the Artifact +for the [`.spec.chart` field](#chart) in the HelmChart's +`.status.observedChartName`. It is used to keep track of the chart and detect +when a new chart is found. + +### Observed Generation + +The source-controller reports an [observed generation][typical-status-properties] +in the HelmChart's `.status.observedGeneration`. The observed generation is the +latest `.metadata.generation` which resulted in either a [ready state](#ready-helmchart), +or stalled due to error it can not recover from without human +intervention. + +### Last Handled Reconcile At + +The source-controller reports the last `reconcile.fluxcd.io/requestedAt` +annotation value it acted on in the `.status.lastHandledReconcileAt` field. + +For practical information about this field, see [triggering a +reconcile](#triggering-a-reconcile). + +[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus diff --git a/docs/spec/v1/helmrepositories.md b/docs/spec/v1/helmrepositories.md new file mode 100644 index 000000000..97fdff2ec --- /dev/null +++ b/docs/spec/v1/helmrepositories.md @@ -0,0 +1,881 @@ +# Helm Repositories + + + +There are 2 [Helm repository types](#type) defined by the `HelmRepository` API: +- Helm HTTP/S repository, which defines a Source to produce an Artifact for a Helm +repository index YAML (`index.yaml`). +- OCI Helm repository, which defines a source that does not produce an Artifact. + It's a data container to store the information about the OCI repository that + can be used by [HelmChart](helmcharts.md) to access OCI Helm charts. + +## Examples + +### Helm HTTP/S repository + +The following is an example of a HelmRepository. It creates a YAML (`.yaml`) +Artifact from the fetched Helm repository index (in this example the [podinfo +repository](https://github.com/stefanprodan/podinfo)): + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: https://stefanprodan.github.io/podinfo +``` + +In the above example: + +- A HelmRepository named `podinfo` is created, indicated by the + `.metadata.name` field. +- The source-controller fetches the Helm repository index YAML every five + minutes from `https://stefanprodan.github.io/podinfo`, indicated by the + `.spec.interval` and `.spec.url` fields. +- The digest (algorithm defaults to SHA256) of the Helm repository index after + stable sorting the entries is used as Artifact revision, reported in-cluster + in the `.status.artifact.revision` field. +- When the current HelmRepository revision differs from the latest fetched + revision, it is stored as a new Artifact. +- The new Artifact is reported in the `.status.artifact` field. + +You can run this example by saving the manifest into `helmrepository.yaml`. + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f helmrepository.yaml + ``` + +2. Run `kubectl get helmrepository` to see the HelmRepository: + + ```console + NAME URL AGE READY STATUS + podinfo https://stefanprodan.github.io/podinfo 4s True stored artifact for revision 'sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' + ``` + +3. Run `kubectl describe helmrepository podinfo` to see the [Artifact](#artifact) + and [Conditions](#conditions) in the HelmRepository's Status: + + ```console + ... + Status: + Artifact: + Digest: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + Last Update Time: 2022-02-04T09:55:58Z + Path: helmrepository/default/podinfo/index-83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111.yaml + Revision: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + Size: 40898 + URL: http://source-controller.flux-system.svc.cluster.local./helmrepository/default/podinfo/index-83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111.yaml + Conditions: + Last Transition Time: 2022-02-04T09:55:58Z + Message: stored artifact for revision 'sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: Ready + Last Transition Time: 2022-02-04T09:55:58Z + Message: stored artifact for revision 'sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: ArtifactInStorage + Observed Generation: 1 + URL: http://source-controller.flux-system.svc.cluster.local./helmrepository/default/podinfo/index.yaml + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal NewArtifact 1m source-controller fetched index of size 30.88kB from 'https://stefanprodan.github.io/podinfo' + ``` + +### Helm OCI repository + +The following is an example of an OCI HelmRepository. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: podinfo + namespace: default +spec: + type: "oci" + interval: 5m0s + url: oci://ghcr.io/stefanprodan/charts +``` + +In the above example: + +- A HelmRepository named `podinfo` is created, indicated by the + `.metadata.name` field. +- A HelmChart that refers to this HelmRepository uses the URL in the `.spec.url` + field to access the OCI Helm chart. + +**NOTE:** The `.spec.interval` field is only used by the `default` Helm +repository and is ignored for any value in `oci` Helm repository. + +You can run this example by saving the manifest into `helmrepository.yaml`. + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f helmrepository.yaml + ``` + +2. Run `kubectl get helmrepository` to see the HelmRepository: + + ```console + NAME URL AGE READY STATUS + podinfo oci://ghcr.io/stefanprodan/charts 3m22s + ``` + +Because the OCI Helm repository is a data container, there's nothing to report +for `READY` and `STATUS` columns above. The existence of the object can be +considered to be ready for use. + +## Writing a HelmRepository spec + +As with all other Kubernetes config, a HelmRepository needs `apiVersion`, +`kind`, and `metadata` fields. The name of a HelmRepository object must be a +valid [DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +A HelmRepository also needs a +[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### Type + +`.spec.type` is an optional field that specifies the Helm repository type. + +Possible values are `default` for a Helm HTTP/S repository, or `oci` for an OCI Helm repository. + +**Note:**: For improved support for OCI Helm charts, please use the +[`OCIRepository`](ocirepositories.md) API. + +### Provider + +`.spec.provider` is an optional field that allows specifying an OIDC provider used +for authentication purposes. + +Supported options are: +- `generic` +- `aws` +- `azure` +- `gcp` + +The `generic` provider can be used for public repositories or when static credentials +are used for authentication. If you do not specify `.spec.provider`, it defaults +to `generic`. + +**Note**: The provider field is supported only for Helm OCI repositories. The `spec.type` +field must be set to `oci`. + +#### AWS + +The `aws` provider can be used to authenticate automatically using the EKS worker +node IAM role or IAM Role for Service Accounts (IRSA), and by extension gain access +to ECR. + +##### EKS Worker Node IAM Role + +When the worker node IAM role has access to ECR, source-controller running on it +will also have access to ECR. + +##### IAM Role for Service Accounts (IRSA) + +When using IRSA to enable access to ECR, add the following patch to your bootstrap +repository, in the `flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + annotations: + eks.amazonaws.com/role-arn: + target: + kind: ServiceAccount + name: source-controller +``` + +Note that you can attach the AWS managed policy `arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly` +to the IAM role when using IRSA. + +#### Azure + +The `azure` provider can be used to authenticate automatically using Workload Identity and Kubelet Managed +Identity to gain access to ACR. + +##### Kubelet Managed Identity + +When the kubelet managed identity has access to ACR, source-controller running on +it will also have access to ACR. + +**Note:** If you have more than one identity configured on the cluster, you have to specify which one to use +by setting the `AZURE_CLIENT_ID` environment variable in the source-controller deployment. + +If you are running into further issues, please look at the +[troubleshooting guide](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azidentity/TROUBLESHOOTING.md#azure-virtual-machine-managed-identity). + +##### Azure Workload Identity + +When using Workload Identity to enable access to ACR, add the following patch to +your bootstrap repository, in the `flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: |- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + namespace: flux-system + annotations: + azure.workload.identity/client-id: + labels: + azure.workload.identity/use: "true" + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: source-controller + namespace: flux-system + labels: + azure.workload.identity/use: "true" + spec: + template: + metadata: + labels: + azure.workload.identity/use: "true" +``` + +Ensure Workload Identity is properly set up on your cluster and the mutating webhook is installed. +Create an identity that has access to ACR. Next, establish +a federated identity between the source-controller ServiceAccount and the +identity. Patch the source-controller Deployment and ServiceAccount as shown in the patch +above. Please take a look at this [guide](https://azure.github.io/azure-workload-identity/docs/quick-start.html#6-establish-federated-identity-credential-between-the-identity-and-the-service-account-issuer--subject). + +#### GCP + +The `gcp` provider can be used to authenticate automatically using OAuth scopes or +Workload Identity, and by extension gain access to GCR or Artifact Registry. + +##### Access Scopes + +When the GKE nodes have the appropriate OAuth scope for accessing GCR and Artifact Registry, +source-controller running on it will also have access to them. + +##### GKE Workload Identity + +When using Workload Identity to enable access to GCR or Artifact Registry, add the +following patch to your bootstrap repository, in the `flux-system/kustomization.yaml` +file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + annotations: + iam.gke.io/gcp-service-account: + target: + kind: ServiceAccount + name: source-controller +``` + +The Artifact Registry service uses the permission `artifactregistry.repositories.downloadArtifacts` +that is located under the Artifact Registry Reader role. If you are using Google Container Registry service, +the needed permission is instead `storage.objects.list` which can be bound as part +of the Container Registry Service Agent role. Take a look at [this guide](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) +for more information about setting up GKE Workload Identity. + +### Insecure + +`.spec.insecure` is an optional field to allow connecting to an insecure (HTTP) +container registry server, if set to `true`. The default value is `false`, +denying insecure non-TLS connections when fetching Helm chart OCI artifacts. + +**Note**: The insecure field is supported only for Helm OCI repositories. +The `spec.type` field must be set to `oci`. + +### Interval + +**Note:** This field is ineffectual for [OCI Helm +Repositories](#helm-oci-repository). + +`.spec.interval` is a an optional field that specifies the interval which the +Helm repository index must be consulted at. When not set, the default value is +`1m`. + +After successfully reconciling a HelmRepository object, the source-controller +requeues the object for inspection after the specified interval. The value +must be in a [Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `10m0s` to fetch the HelmRepository index YAML every 10 minutes. + +If the `.metadata.generation` of a resource changes (due to e.g. applying a +change to the spec), this is handled instantly outside the interval window. + +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple HelmRepository objects +are set up with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + +### URL + +`.spec.url` is a required field that depending on the [type of the HelmRepository object](#type) +specifies the HTTP/S or OCI address of a Helm repository. + +For OCI, the URL is expected to point to a registry repository, e.g. `oci://ghcr.io/fluxcd/source-controller`. + +For Helm repositories which require authentication, see [Secret reference](#secret-reference). + +### Timeout + +**Note:** This field is not applicable to [OCI Helm +Repositories](#helm-oci-repository). + +`.spec.timeout` is an optional field to specify a timeout for the fetch +operation. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `1m30s` for a timeout of one minute and thirty seconds. When not set, the +default value is `1m`. + +### Secret reference + +`.spec.secretRef.name` is an optional field to specify a name reference to a +Secret in the same namespace as the HelmRepository, containing authentication +credentials for the repository. + +#### Basic access authentication + +To authenticate towards a Helm repository using basic access authentication +(in other words: using a username and password), the referenced Secret is +expected to contain `.data.username` and `.data.password` values. + +For example: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: example + namespace: default +spec: + interval: 5m0s + url: https://example.com + secretRef: + name: example-user +--- +apiVersion: v1 +kind: Secret +metadata: + name: example-user + namespace: default +stringData: + username: "user-123456" + password: "pass-123456" +``` + +OCI Helm repository example: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: oci://ghcr.io/my-user/my-private-repo + type: "oci" + secretRef: + name: oci-creds +--- +apiVersion: v1 +kind: Secret +metadata: + name: oci-creds + namespace: default +stringData: + username: "user-123456" + password: "pass-123456" +``` + +For OCI Helm repositories, Kubernetes secrets of type [kubernetes.io/dockerconfigjson](https://kubernetes.io/docs/concepts/configuration/secret/#secret-types) are also supported. +It is possible to create one such secret with `kubectl create secret docker-registry` +or using the Flux CLI: + +```yaml +flux create secret oci ghcr-auth \ + --url=ghcr.io \ + --username=flux \ + --password=${GITHUB_PAT} +``` + +**Warning:** Support for specifying TLS authentication data using this API has been +deprecated. Please use [`.spec.certSecretRef`](#cert-secret-reference) instead. +If the controller uses the secret specified by this field to configure TLS, then +a deprecation warning will be logged. + +### Mutual TLS Authentication + +`.spec.certSecretRef.name` is an optional field to specify a secret containing +TLS certificate data for mutual TLS authentication. + +To authenticate towards a Helm repository using mutual TLS, +the referenced Secret's `.data` should contain the following keys: + +* `tls.crt` and `tls.key`, to specify the client certificate and private key used +for TLS client authentication. These must be used in conjunction, i.e. +specifying one without the other will lead to an error. +* `ca.crt`, to specify the CA certificate used to verify the server, which is +required if the server is using a self-signed certificate. + +The Secret should be of type `Opaque` or `kubernetes.io/tls`. All the files in +the Secret are expected to be [PEM-encoded][pem-encoding]. Assuming you have +three files; `client.key`, `client.crt` and `ca.crt` for the client private key, +client certificate and the CA certificate respectively, you can generate the +required Secret using the `flux create secret tls` command: + +```sh +flux create secret tls --tls-key-file=client.key --tls-crt-file=client.crt --ca-crt-file=ca.crt +``` + +Example usage: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: example + namespace: default +spec: + interval: 5m0s + url: https://example.com + certSecretRef: + name: example-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: example-tls + namespace: default +type: kubernetes.io/tls # or Opaque +data: + tls.crt: + tls.key: + # NOTE: Can be supplied without the above values + ca.crt: +``` + +### Pass credentials + +`.spec.passCredentials` is an optional field to allow the credentials from the +[Secret reference](#secret-reference) to be passed on to a host that does not +match the host as defined in URL. This may for example be required if the host +advertised chart URLs in the index differ from the specified URL. + +Enabling this should be done with caution, as it can potentially result in +credentials getting stolen in a man-in-the-middle attack. This feature only applies +to HTTP/S Helm repositories. + +### Suspend + +**Note:** This field is not applicable to [OCI Helm +Repositories](#helm-oci-repository). + +`.spec.suspend` is an optional field to suspend the reconciliation of a +HelmRepository. When set to `true`, the controller will stop reconciling the +HelmRepository, and changes to the resource or the Helm repository index will +not result in a new Artifact. When the field is set to `false` or removed, it +will resume. + +For practical information, see +[suspending and resuming](#suspending-and-resuming). + +## Working with HelmRepositories + +**Note:** This section does not apply to [OCI Helm +Repositories](#helm-oci-repository), being a data container, once created, they +are ready to used by [HelmCharts](helmcharts.md). + +### Triggering a reconcile + +To manually tell the source-controller to reconcile a HelmRepository outside the +[specified interval window](#interval), a HelmRepository can be annotated with +`reconcile.fluxcd.io/requestedAt: `. Annotating the resource +queues the object for reconciliation if the `` differs from +the last value the controller acted on, as reported in +[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at). + +Using `kubectl`: + +```sh +kubectl annotate --field-manager=flux-client-side-apply --overwrite helmrepository/ reconcile.fluxcd.io/requestedAt="$(date +%s)" +``` + +Using `flux`: + +```sh +flux reconcile source helm +``` + +### Waiting for `Ready` + +When a change is applied, it is possible to wait for the HelmRepository to +reach a [ready state](#ready-helmrepository) using `kubectl`: + +```sh +kubectl wait helmrepository/ --for=condition=ready --timeout=1m +``` + +### Suspending and resuming + +When you find yourself in a situation where you temporarily want to pause the +reconciliation of a HelmRepository, you can suspend it using the +[`.spec.suspend` field](#suspend). + +#### Suspend a HelmRepository + +In your YAML declaration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: +spec: + suspend: true +``` + +Using `kubectl`: + +```sh +kubectl patch helmrepository --field-manager=flux-client-side-apply -p '{\"spec\": {\"suspend\" : true }}' +``` + +Using `flux`: + +```sh +flux suspend source helm +``` + +**Note:** When a HelmRepository has an Artifact and is suspended, and this +Artifact later disappears from the storage due to e.g. the source-controller +Pod being evicted from a Node, this will not be reflected in the +HelmRepository's Status until it is resumed. + +#### Resume a HelmRepository + +In your YAML declaration, comment out (or remove) the field: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: +spec: + # suspend: true +``` + +**Note:** Setting the field value to `false` has the same effect as removing +it, but does not allow for "hot patching" using e.g. `kubectl` while practicing +GitOps; as the manually applied patch would be overwritten by the declared +state in Git. + +Using `kubectl`: + +```sh +kubectl patch helmrepository --field-manager=flux-client-side-apply -p '{\"spec\" : {\"suspend\" : false }}' +``` + +Using `flux`: + +```sh +flux resume source helm +``` + +### Debugging a HelmRepository + +**Note:** This section does not apply to [OCI Helm +Repositories](#helm-oci-repository), being a data container, they are static +objects that don't require debugging if valid. + +There are several ways to gather information about a HelmRepository for debugging +purposes. + +#### Describe the HelmRepository + +Describing a HelmRepository using `kubectl describe helmrepository ` +displays the latest recorded information for the resource in the `Status` and +`Events` sections: + +```console +... +Status: +... + Conditions: + Last Transition Time: 2022-02-04T13:41:56Z + Message: failed to construct Helm client: scheme "invalid" not supported + Observed Generation: 2 + Reason: Failed + Status: True + Type: Stalled + Last Transition Time: 2022-02-04T13:41:56Z + Message: failed to construct Helm client: scheme "invalid" not supported + Observed Generation: 2 + Reason: Failed + Status: False + Type: Ready + Last Transition Time: 2022-02-04T13:41:56Z + Message: failed to construct Helm client: scheme "invalid" not supported + Observed Generation: 2 + Reason: Failed + Status: True + Type: FetchFailed + Observed Generation: 2 + URL: http://source-controller.source-system.svc.cluster.local./helmrepository/default/podinfo/index.yaml +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning Failed 6s source-controller failed to construct Helm client: scheme "invalid" not supported +``` + +#### Trace emitted Events + +To view events for specific HelmRepository(s), `kubectl events` can be used in +combination with `--for` to list the Events for specific objects. For example, +running + +```sh +kubectl events --for HelmRepository/ +``` + +lists + +```console +LAST SEEN TYPE REASON OBJECT MESSAGE +107s Warning Failed helmrepository/ failed to construct Helm client: scheme "invalid" not supported +7s Normal NewArtifact helmrepository/ fetched index of size 30.88kB from 'https://stefanprodan.github.io/podinfo' +3s Normal ArtifactUpToDate helmrepository/ artifact up-to-date with remote revision: 'sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111' +``` + +Besides being reported in Events, the reconciliation errors are also logged by +the controller. The Flux CLI offer commands for filtering the logs for a +specific HelmRepository, e.g. `flux logs --level=error --kind=HelmRepository --name=`. + +## HelmRepository Status + +**Note:** This section does not apply to [OCI Helm +Repositories](#helm-oci-repository), they do not contain any information in the +status. + +### Artifact + +The HelmRepository reports the last fetched repository index as an Artifact +object in the `.status.artifact` of the resource. + +The Artifact file is an exact copy of the Helm repository index YAML +(`index-.yaml`) as fetched, and can be retrieved in-cluster from the +`.status.artifact.url` HTTP address. + +#### Artifact example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: HelmRepository +metadata: + name: +status: + artifact: + digest: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + lastUpdateTime: "2022-02-04T09:55:58Z" + path: helmrepository///index-83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111.yaml + revision: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111 + size: 40898 + url: http://source-controller.flux-system.svc.cluster.local./helmrepository///index-83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111.yaml +``` + +### Conditions + +A HelmRepository enters various states during its lifecycle, reflected as [Kubernetes +Conditions][typical-status-properties]. +It can be [reconciling](#reconciling-helmrepository) while fetching the +repository index, it can be [ready](#ready-helmrepository), it can +[fail during reconciliation](#failed-helmrepository), or it can +[stall](#stalled-helmrepository). + +The HelmRepository API is compatible with the [kstatus +specification][kstatus-spec], +and reports `Reconciling` and `Stalled` conditions where applicable to +provide better (timeout) support to solutions polling the HelmRepository to become +`Ready`. + +#### Reconciling HelmRepository + +The source-controller marks a HelmRepository as _reconciling_ when one of the following +is true: + +- There is no current Artifact for the HelmRepository, or the reported Artifact + is determined to have disappeared from the storage. +- The generation of the HelmRepository is newer than the [Observed + Generation](#observed-generation). +- The newly fetched Artifact revision differs from the current Artifact. + +When the HelmRepository is "reconciling", the `Ready` Condition status becomes +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the HelmRepository's `.status.conditions`: + +- `type: Reconciling` +- `status: "True"` +- `reason: Progressing` | `reason: ProgressingWithRetry` + +If the reconciling state is due to a new revision, it adds an additional +Condition with the following attributes: + +- `type: ArtifactOutdated` +- `status: "True"` +- `reason: NewRevision` + +Both Conditions have a ["negative polarity"][typical-status-properties], +and are only present on the HelmRepository while their status value is `"True"`. + +#### Ready HelmRepository + +The source-controller marks a HelmRepository as _ready_ when it has the following +characteristics: + +- The HelmRepository reports an [Artifact](#artifact). +- The reported Artifact exists in the controller's Artifact storage. +- The controller was able to fetch the Helm repository index using the current + spec. +- The revision of the reported Artifact is up-to-date with the latest + revision of the Helm repository. + +When the HelmRepository is "ready", the controller sets a Condition with the following +attributes in the HelmRepository's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +This `Ready` Condition will retain a status value of `"True"` until the +HelmRepository is marked as [reconciling](#reconciling-helmrepository), or e.g. +a [transient error](#failed-helmrepository) occurs due to a temporary network +issue. + +When the HelmRepository Artifact is archived in the controller's Artifact +storage, the controller sets a Condition with the following attributes in the +HelmRepository's `.status.conditions`: + +- `type: ArtifactInStorage` +- `status: "True"` +- `reason: Succeeded` + +This `ArtifactInStorage` Condition will retain a status value of `"True"` until +the Artifact in the storage no longer exists. + +#### Failed HelmRepository + +The source-controller may get stuck trying to produce an Artifact for a +HelmRepository without completing. This can occur due to some of the following +factors: + +- The Helm repository [URL](#url) is temporarily unavailable. +- The [Secret reference](#secret-reference) contains a reference to a + non-existing Secret. +- The credentials in the referenced Secret are invalid. +- The HelmRepository spec contains a generic misconfiguration. +- A storage related failure when storing the artifact. + +When this happens, the controller sets the `Ready` Condition status to `False`, +and adds a Condition with the following attributes to the HelmRepository's +`.status.conditions`: + +- `type: FetchFailed` | `type: StorageOperationFailed` +- `status: "True"` +- `reason: AuthenticationFailed` | `reason: IndexationFailed` | `reason: Failed` + +This condition has a ["negative polarity"][typical-status-properties], +and is only present on the HelmRepository while the status value is `"True"`. +There may be more arbitrary values for the `reason` field to provide accurate +reason for a condition. + +While the HelmRepository has this Condition, the controller will continue to +attempt to produce an Artifact for the resource with an exponential backoff, +until it succeeds and the HelmRepository is marked as [ready](#ready-helmrepository). + +Note that a HelmRepository can be [reconciling](#reconciling-helmrepository) +while failing at the same time, for example due to a newly introduced +configuration issue in the HelmRepository spec. When a reconciliation fails, the +`Reconciling` Condition reason would be `ProgressingWithRetry`. When the +reconciliation is performed again after the failure, the reason is updated to +`Progressing`. + +#### Stalled HelmRepository + +The source-controller can mark a HelmRepository as _stalled_ when it determines +that without changes to the spec, the reconciliation can not succeed. +For example because a Helm repository URL with an unsupported protocol is +specified. + +When this happens, the controller sets the same Conditions as when it +[fails](#failed-helmrepository), but adds another Condition with the following +attributes to the HelmRepository's +`.status.conditions`: + +- `type: Stalled` +- `status: "True"` +- `reason: URLInvalid` + +While the HelmRepository has this Condition, the controller will not requeue +the resource any further, and will stop reconciling the resource until a change +to the spec is made. + +### Observed Generation + +The source-controller reports an [observed generation][typical-status-properties] +in the HelmRepository's `.status.observedGeneration`. The observed generation is +the latest `.metadata.generation` which resulted in either a [ready state](#ready-helmrepository), +or stalled due to error it can not recover from without human intervention. + +### Last Handled Reconcile At + +The source-controller reports the last `reconcile.fluxcd.io/requestedAt` +annotation value it acted on in the `.status.lastHandledReconcileAt` field. + +For practical information about this field, see [triggering a +reconcile](#triggering-a-reconcile). + +[pem-encoding]: https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail +[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus diff --git a/docs/spec/v1/ocirepositories.md b/docs/spec/v1/ocirepositories.md new file mode 100644 index 000000000..d2bfa399e --- /dev/null +++ b/docs/spec/v1/ocirepositories.md @@ -0,0 +1,1147 @@ +# OCI Repositories + + + +The `OCIRepository` API defines a Source to produce an Artifact for an OCI +repository. + +## Example + +The following is an example of an OCIRepository. It creates a tarball +(`.tar.gz`) Artifact with the fetched data from an OCI repository for the +resolved digest. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: oci://ghcr.io/stefanprodan/manifests/podinfo + ref: + tag: latest +``` + +In the above example: + +- An OCIRepository named `podinfo` is created, indicated by the + `.metadata.name` field. +- The source-controller checks the OCI repository every five minutes, indicated + by the `.spec.interval` field. +- It pulls the `latest` tag of the `ghcr.io/stefanprodan/manifests/podinfo` + repository, indicated by the `.spec.ref.tag` and `.spec.url` fields. +- The resolved tag and SHA256 digest is used as the Artifact + revision, reported in-cluster in the `.status.artifact.revision` field. +- When the current OCIRepository digest differs from the latest fetched + digest, a new Artifact is archived. +- The new Artifact is reported in the `.status.artifact` field. + +You can run this example by saving the manifest into `ocirepository.yaml`. + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f ocirepository.yaml + ``` + +2. Run `kubectl get ocirepository` to see the OCIRepository: + + ```console + NAME URL AGE READY STATUS + podinfo oci://ghcr.io/stefanprodan/manifests/podinfo 5s True stored artifact with revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' + ``` + +3. Run `kubectl describe ocirepository podinfo` to see the [Artifact](#artifact) + and [Conditions](#conditions) in the OCIRepository's Status: + + ```console + ... + Status: + Artifact: + Digest: sha256:d7e924b4882e55b97627355c7b3d2e711e9b54303afa2f50c25377f4df66a83b + Last Update Time: 2025-06-14T11:23:36Z + Path: ocirepository/default/podinfo/3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de.tar.gz + Revision: latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de + Size: 1105 + URL: http://source-controller.flux-system.svc.cluster.local./ocirepository/oci/podinfo/3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de.tar.gz + Conditions: + Last Transition Time: 2025-06-14T11:23:36Z + Message: stored artifact for revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: Ready + Last Transition Time: 2025-06-14T11:23:36Z + Message: stored artifact for revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: ArtifactInStorage + Observed Generation: 1 + URL: http://source-controller.source-system.svc.cluster.local./gitrepository/default/podinfo/latest.tar.gz + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal NewArtifact 62s source-controller stored artifact with revision 'latest/3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' from 'oci://ghcr.io/stefanprodan/manifests/podinfo' + ``` + +## Writing an OCIRepository spec + +As with all other Kubernetes config, an OCIRepository needs `apiVersion`, +`kind`, and `metadata` fields. The name of an OCIRepository object must be a +valid [DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +An OCIRepository also needs a +[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### URL + +`.spec.url` is a required field that specifies the address of the +container image repository in the format `oci://://`. + +**Note:** that specifying a tag or digest is not acceptable for this field. + +### Provider + +`.spec.provider` is an optional field that allows specifying an OIDC provider used for +authentication purposes. + +Supported options are: + +- `generic` +- `aws` +- `azure` +- `gcp` + +The `generic` provider can be used for public repositories or when +static credentials are used for authentication, either with +`spec.secretRef` or `spec.serviceAccountName`. +If you do not specify `.spec.provider`, it defaults to `generic`. + +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + +#### AWS + +The `aws` provider can be used to authenticate automatically using the EKS +worker node IAM role or IAM Role for Service Accounts (IRSA), and by extension +gain access to ECR. + +When the worker node IAM role has access to ECR, source-controller running on it +will also have access to ECR. + +When using IRSA to enable access to ECR, add the following patch to your +bootstrap repository, in the `flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + annotations: + eks.amazonaws.com/role-arn: + target: + kind: ServiceAccount + name: source-controller +``` + +Note that you can attach the AWS managed policy `arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly` +to the IAM role when using IRSA. + +#### Azure + +The `azure` provider can be used to authenticate automatically using Workload Identity and Kubelet Managed +Identity to gain access to ACR. + +##### Kubelet Managed Identity + +When the kubelet managed identity has access to ACR, source-controller running +on it will also have access to ACR. + +**Note:** If you have more than one identity configured on the cluster, you have to specify which one to use +by setting the `AZURE_CLIENT_ID` environment variable in the source-controller deployment. + +If you are running into further issues, please look at the +[troubleshooting guide](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azidentity/TROUBLESHOOTING.md#azure-virtual-machine-managed-identity). + +##### Workload Identity + +When using Workload Identity to enable access to ACR, add the following patch to +your bootstrap repository, in the `flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: |- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + namespace: flux-system + annotations: + azure.workload.identity/client-id: + labels: + azure.workload.identity/use: "true" + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: source-controller + namespace: flux-system + labels: + azure.workload.identity/use: "true" + spec: + template: + metadata: + labels: + azure.workload.identity/use: "true" +``` + +Ensure Workload Identity is properly set up on your cluster and the mutating webhook is installed. +Create an identity that has access to ACR. Next, establish +a federated identity between the source-controller ServiceAccount and the +identity. Patch the source-controller Deployment and ServiceAccount as shown in the patch +above. Please take a look at this [guide](https://azure.github.io/azure-workload-identity/docs/quick-start.html#6-establish-federated-identity-credential-between-the-identity-and-the-service-account-issuer--subject). + +#### GCP + +The `gcp` provider can be used to authenticate automatically using OAuth scopes +or Workload Identity, and by extension gain access to GCR or Artifact Registry. + +When the GKE nodes have the appropriate OAuth scope for accessing GCR and +Artifact Registry, source-controller running on it will also have access to them. + +When using Workload Identity to enable access to GCR or Artifact Registry, add +the following patch to your bootstrap repository, in the +`flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + annotations: + iam.gke.io/gcp-service-account: + target: + kind: ServiceAccount + name: source-controller +``` + +The Artifact Registry service uses the permission `artifactregistry.repositories.downloadArtifacts` +that is located under the Artifact Registry Reader role. If you are using +Google Container Registry service, the needed permission is instead `storage.objects.list` +which can be bound as part of the Container Registry Service Agent role. +Take a look at [this guide](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) +for more information about setting up GKE Workload Identity. + +### Secret reference + +`.spec.secretRef.name` is an optional field to specify a name reference to a +Secret in the same namespace as the OCIRepository, containing authentication +credentials for the OCI repository. + +This secret is expected to be in the same format as [`imagePullSecrets`][image-pull-secrets]. +The usual way to create such a secret is with: + +```sh +kubectl create secret docker-registry ... +``` + +### Service Account reference + +`.spec.serviceAccountName` is an optional field to specify a Service Account +in the same namespace as OCIRepository with purpose depending on the value of +the `.spec.provider` field: + +- When `.spec.provider` is set to `generic`, the controller will fetch the image + pull secrets attached to the Service Account and use them for authentication. +- When `.spec.provider` is set to `aws`, `azure`, or `gcp`, the Service Account + will be used for Workload Identity authentication. In this case, the controller + feature gate `ObjectLevelWorkloadIdentity` must be enabled, otherwise the + controller will error out. + +**Note:** that for a publicly accessible image repository, you don't need to +provide a `secretRef` nor `serviceAccountName`. + +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + +### Mutual TLS Authentication + +`.spec.certSecretRef.name` is an optional field to specify a secret containing +TLS certificate data for mutual TLS authentication. + +To authenticate towards an OCI repository using mutual TLS, +the referenced Secret's `.data` should contain the following keys: + +* `tls.crt` and `tls.key`, to specify the client certificate and private key used +for TLS client authentication. These must be used in conjunction, i.e. +specifying one without the other will lead to an error. +* `ca.crt`, to specify the CA certificate used to verify the server, which is +required if the server is using a self-signed certificate. + +The Secret should be of type `Opaque` or `kubernetes.io/tls`. All the files in +the Secret are expected to be [PEM-encoded][pem-encoding]. Assuming you have +three files; `client.key`, `client.crt` and `ca.crt` for the client private key, +client certificate and the CA certificate respectively, you can generate the +required Secret using the `flux create secret tls` command: + +```sh +flux create secret tls --tls-key-file=client.key --tls-crt-file=client.crt --ca-crt-file=ca.crt +``` + +Example usage: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: example + namespace: default +spec: + interval: 5m0s + url: oci://example.com + certSecretRef: + name: example-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: example-tls + namespace: default +type: kubernetes.io/tls # or Opaque +data: + tls.crt: + tls.key: + # NOTE: Can be supplied without the above values + ca.crt: +``` + +### Proxy secret reference + +`.spec.proxySecretRef.name` is an optional field used to specify the name of a +Secret that contains the proxy settings for the object. These settings are used +for all the remote operations related to the OCIRepository. +The Secret can contain three keys: + +- `address`, to specify the address of the proxy server. This is a required key. +- `username`, to specify the username to use if the proxy server is protected by + basic authentication. This is an optional key. +- `password`, to specify the password to use if the proxy server is protected by + basic authentication. This is an optional key. + +Example: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: http-proxy +type: Opaque +stringData: + address: http://proxy.com + username: mandalorian + password: grogu +``` + +Proxying can also be configured in the source-controller Deployment directly by +using the standard environment variables such as `HTTPS_PROXY`, `ALL_PROXY`, etc. + +`.spec.proxySecretRef.name` takes precedence over all environment variables. + +**Warning:** [Cosign](https://github.com/sigstore/cosign) *keyless* +[verification](#verification) is not supported for this API. If you +require cosign keyless verification to use a proxy you must use the +standard environment variables mentioned above. If you specify a +`proxySecretRef` the controller will simply send out the requests +needed for keyless verification without the associated object-level +proxy settings. + +### Insecure + +`.spec.insecure` is an optional field to allow connecting to an insecure (HTTP) +container registry server, if set to `true`. The default value is `false`, +denying insecure (HTTP) connections. + +### Interval + +`.spec.interval` is a required field that specifies the interval at which the +OCI repository must be fetched. + +After successfully reconciling the object, the source-controller requeues it +for inspection after the specified interval. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `10m0s` to reconcile the object every 10 minutes. + +If the `.metadata.generation` of a resource changes (due to e.g. a change to +the spec), this is handled instantly outside the interval window. + +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple OCIRepository objects are +set up with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + +### Timeout + +`.spec.timeout` is an optional field to specify a timeout for OCI operations +like pulling. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `1m30s` for a timeout of one minute and thirty seconds. The default value +is `60s`. + +### Reference + +`.spec.ref` is an optional field to specify the OCI reference to resolve and +watch for changes. References are specified in one or more subfields +(`.tag`, `.semver`, `.digest`), with latter listed fields taking +precedence over earlier ones. If not specified, it defaults to the `latest` +tag. + +#### Tag example + +To pull a specific tag, use `.spec.ref.tag`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + ref: + tag: "" +``` + +#### SemVer example + +To pull a tag based on a +[SemVer range](https://github.com/Masterminds/semver#checking-version-constraints), +use `.spec.ref.semver`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + ref: + # SemVer range reference: https://github.com/Masterminds/semver#checking-version-constraints + semver: "" +``` + +This field takes precedence over [`.tag`](#tag-example). + +#### SemverFilter example + +`.spec.ref.semverFilter` is an optional field to specify a SemVer filter to apply +when fetching tags from the OCI repository. The filter is a regular expression +that is applied to the tags fetched from the repository. Only tags that match +the filter are considered for the semver range resolution. + +**Note:** The filter is only taken into account when the `.spec.ref.semver` field +is set. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: oci://ghcr.io/stefanprodan/manifests/podinfo + ref: + # SemVer comparisons using constraints without a prerelease comparator will skip prerelease versions. + # Adding a `-0` suffix to the semver range will include prerelease versions. + semver: ">= 6.1.x-0" + semverFilter: ".*-rc.*" +``` + +In the above example, the controller fetches tags from the `ghcr.io/stefanprodan/manifests/podinfo` +repository and filters them using the regular expression `.*-rc.*`. Only tags that +contain the `-rc` suffix are considered for the semver range resolution. + +#### Digest example + +To pull a specific digest, use `.spec.ref.digest`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + ref: + digest: "sha256:" +``` + +This field takes precedence over all other fields. + +### Layer selector + +`spec.layerSelector` is an optional field to specify which layer should be extracted from the OCI Artifact. +If not specified, the controller will extract the first layer found in the artifact. + +To extract a layer matching a specific +[OCI media type](https://github.com/opencontainers/image-spec/blob/v1.0.2/media-types.md): + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + layerSelector: + mediaType: "application/vnd.cncf.helm.chart.content.v1.tar+gzip" + operation: extract # can be 'extract' or 'copy', defaults to 'extract' +``` + +If the layer selector matches more than one layer, the first layer matching the specified media type will be used. +Note that the selected OCI layer must be +[compressed](https://github.com/opencontainers/image-spec/blob/v1.0.2/layer.md#gzip-media-types) +in the `tar+gzip` format. + +When `.spec.layerSelector.operation` is set to `copy`, instead of extracting the +compressed layer, the controller copies the tarball as-is to storage, thus +keeping the original content unaltered. + +### Ignore + +`.spec.ignore` is an optional field to specify rules in [the `.gitignore` +pattern format](https://git-scm.com/docs/gitignore#_pattern_format). Paths +matching the defined rules are excluded while archiving. + +When specified, `.spec.ignore` overrides the [default exclusion +list](#default-exclusions), and may overrule the [`.sourceignore` file +exclusions](#sourceignore-file). See [excluding files](#excluding-files) +for more information. + +### Verification + +`.spec.verify` is an optional field to enable the verification of [Cosign](https://github.com/sigstore/cosign) +or [Notation](https://github.com/notaryproject/notation) +signatures. The field offers three subfields: + +- `.provider`, to specify the verification provider. The supported options are `cosign` and `notation` at present. +- `.secretRef.name`, to specify a reference to a Secret in the same namespace as + the OCIRepository, containing the Cosign public keys of trusted authors. For Notation this Secret should also + include the [trust policy](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/trust-store-trust-policy.md#trust-policy) in + addition to the CA certificate. +- `.matchOIDCIdentity`, to specify a list of OIDC identity matchers (only supported when using `cosign` as the + verification provider). Please see + [Keyless verification](#keyless-verification) for more details. + +#### Cosign + +The `cosign` provider can be used to verify the signature of an OCI artifact using either a known public key +or via the [Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + verify: + provider: cosign + secretRef: + name: cosign-public-keys +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the OCIRepository's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +##### Public keys verification + +To verify the authenticity of an OCI artifact, create a Kubernetes secret +with the Cosign public keys: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: cosign-public-keys +type: Opaque +data: + key1.pub: + key2.pub: +``` + +Note that the keys must have the `.pub` extension for Flux to make use of them. + +Flux will loop over the public keys and use them to verify an artifact's signature. +This allows for older artifacts to be valid as long as the right key is in the secret. + +##### Keyless verification + +For publicly available OCI artifacts, which are signed using the +[Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure, +you can enable the verification by omitting the `.verify.secretRef` field. + +To verify the identity's subject and the OIDC issuer present in the Fulcio +certificate, you can specify a list of OIDC identity matchers using +`.spec.verify.matchOIDCIdentity`. The matcher provides two required fields: + +- `.issuer`, to specify a regexp that matches against the OIDC issuer. +- `.subject`, to specify a regexp that matches against the subject identity in + the certificate. +Both values should follow the [Go regular expression syntax](https://golang.org/s/re2syntax). + +The matchers are evaluated in an OR fashion, i.e. the identity is deemed to be +verified if any one matcher successfully matches against the identity. + +Example of verifying artifacts signed by the +[Cosign GitHub Action](https://github.com/sigstore/cosign-installer) with GitHub OIDC Token: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: podinfo +spec: + interval: 5m + url: oci://ghcr.io/stefanprodan/manifests/podinfo + verify: + provider: cosign + matchOIDCIdentity: + - issuer: "^https://token.actions.githubusercontent.com$" + subject: "^https://github.com/stefanprodan/podinfo.*$" +``` + +The controller verifies the signatures using the Fulcio root CA and the Rekor +instance hosted at [rekor.sigstore.dev](https://rekor.sigstore.dev/). + +Note that keyless verification is an **experimental feature**, using +custom root CAs or self-hosted Rekor instances are not currently supported. + +#### Notation + +The `notation` provider can be used to verify the signature of an OCI artifact using known +trust policy and CA certificate. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + verify: + provider: notation + secretRef: + name: notation-config +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the OCIRepository's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +To verify the authenticity of an OCI artifact, create a Kubernetes secret +containing Certificate Authority (CA) root certificates and the a `trust policy` + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: notation-config +type: Opaque +data: + certificate1.pem: + certificate2.crt: + trustpolicy.json: +``` + +Note that the CA certificates must have either `.pem` or `.crt` extension and your trust policy must +be named `trustpolicy.json` for Flux to make use of them. + +For more information on the signing and verification process see [Signing and Verification Workflow](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/signing-and-verification-workflow.md). + +Flux will loop over the certificates and use them to verify an artifact's signature. +This allows for older artifacts to be valid as long as the right certificate is in the secret. + +### Suspend + +`.spec.suspend` is an optional field to suspend the reconciliation of a +OCIRepository. When set to `true`, the controller will stop reconciling the +OCIRepository, and changes to the resource or in the OCI repository will not +result in a new Artifact. When the field is set to `false` or removed, it will +resume. + +## Working with OCIRepositories + +### Excluding files + +By default, files which match the [default exclusion rules](#default-exclusions) +are excluded while archiving the OCI repository contents as an Artifact. +It is possible to overwrite and/or overrule the default exclusions using +the [`.spec.ignore` field](#ignore). + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + ignore: | + # exclude all + /* + # include deploy dir + !/deploy + # exclude file extensions from deploy dir + /deploy/**/*.md + /deploy/**/*.txt +``` + +#### `.sourceignore` file + +Excluding files is possible by adding a `.sourceignore` file in the artifact. +The `.sourceignore` file follows [the `.gitignore` pattern +format](https://git-scm.com/docs/gitignore#_pattern_format), and pattern +entries may overrule [default exclusions](#default-exclusions). + +The controller recursively loads ignore files so a `.sourceignore` can be +placed in the artifact root or in subdirectories. + +### Triggering a reconcile + +To manually tell the source-controller to reconcile a OCIRepository outside the +[specified interval window](#interval), an OCIRepository can be annotated with +`reconcile.fluxcd.io/requestedAt: `. Annotating the resource +queues the OCIRepository for reconciliation if the `` differs +from the last value the controller acted on, as reported in +[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at). + +Using `kubectl`: + +```sh +kubectl annotate --field-manager=flux-client-side-apply --overwrite ocirepository/ reconcile.fluxcd.io/requestedAt="$(date +%s)" +``` + +Using `flux`: + +```sh +flux reconcile source oci +``` + +### Waiting for `Ready` + +When a change is applied, it is possible to wait for the OCIRepository to reach +a [ready state](#ready-ocirepository) using `kubectl`: + +```sh +kubectl wait gitrepository/ --for=condition=ready --timeout=1m +``` + +### Suspending and resuming + +When you find yourself in a situation where you temporarily want to pause the +reconciliation of an OCIRepository, you can suspend it using the +[`.spec.suspend` field](#suspend). + +#### Suspend an OCIRepository + +In your YAML declaration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + suspend: true +``` + +Using `kubectl`: + +```sh +kubectl patch ocirepository --field-manager=flux-client-side-apply -p '{\"spec\": {\"suspend\" : true }}' +``` + +Using `flux`: + +```sh +flux suspend source oci +``` + +**Note:** When an OCIRepository has an Artifact and it is suspended, and this +Artifact later disappears from the storage due to e.g. the source-controller +Pod being evicted from a Node, this will not be reflected in the +OCIRepository's Status until it is resumed. + +#### Resume an OCIRepository + +In your YAML declaration, comment out (or remove) the field: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + # suspend: true +``` + +**Note:** Setting the field value to `false` has the same effect as removing +it, but does not allow for "hot patching" using e.g. `kubectl` while practicing +GitOps; as the manually applied patch would be overwritten by the declared +state in Git. + +Using `kubectl`: + +```sh +kubectl patch ocirepository --field-manager=flux-client-side-apply -p '{\"spec\" : {\"suspend\" : false }}' +``` + +Using `flux`: + +```sh +flux resume source oci +``` + +### Debugging an OCIRepository + +There are several ways to gather information about a OCIRepository for +debugging purposes. + +#### Describe the OCIRepository + +Describing an OCIRepository using +`kubectl describe ocirepository ` +displays the latest recorded information for the resource in the `Status` and +`Events` sections: + +```console +... +Status: +... + Conditions: + Last Transition Time: 2025-02-14T09:40:27Z + Message: processing object: new generation 1 -> 2 + Observed Generation: 2 + Reason: ProgressingWithRetry + Status: True + Type: Reconciling + Last Transition Time: 2025-02-14T09:40:27Z + Message: failed to pull artifact from 'oci://ghcr.io/stefanprodan/manifests/podinfo': couldn't find tag "0.0.1" + Observed Generation: 2 + Reason: OCIOperationFailed + Status: False + Type: Ready + Last Transition Time: 2025-02-14T09:40:27Z + Message: failed to pull artifact from 'oci://ghcr.io/stefanprodan/manifests/podinfo': couldn't find tag "0.0.1" + Observed Generation: 2 + Reason: OCIOperationFailed + Status: True + Type: FetchFailed + Observed Generation: 1 + URL: http://source-controller.source-system.svc.cluster.local./ocirepository/default/podinfo/latest.tar.gz +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning OCIOperationFailed 2s (x9 over 4s) source-controller failed to pull artifact from 'oci://ghcr.io/stefanprodan/manifests/podinfo': couldn't find tag "0.0.1" +``` + +#### Trace emitted Events + +To view events for specific OCIRepository(s), `kubectl events` can be used +in combination with `--for` to list the Events for specific objects. For +example, running + +```sh +kubectl events --for OCIRepository/ +``` + +lists + +```console +LAST SEEN TYPE REASON OBJECT MESSAGE +2m14s Normal NewArtifact ocirepository/ stored artifact for revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' +36s Normal ArtifactUpToDate ocirepository/ artifact up-to-date with remote revision: 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' +94s Warning OCIOperationFailed ocirepository/ failed to pull artifact from 'oci://ghcr.io/stefanprodan/manifests/podinfo': couldn't find tag "0.0.1" +``` + +Besides being reported in Events, the reconciliation errors are also logged by +the controller. The Flux CLI offer commands for filtering the logs for a +specific OCIRepository, e.g. +`flux logs --level=error --kind=OCIRepository --name=`. + +## OCIRepository Status + +### Artifact + +The OCIRepository reports the latest synchronized state from the OCI repository +as an Artifact object in the `.status.artifact` of the resource. + +The `.status.artifact.revision` holds the tag and SHA256 digest of the upstream OCI artifact. + +The `.status.artifact.metadata` holds the upstream OCI artifact metadata such as the +[OpenContainers standard annotations](https://github.com/opencontainers/image-spec/blob/main/annotations.md). +If the OCI artifact was created with `flux push artifact`, then the `metadata` will contain the following +annotations: +- `org.opencontainers.image.created` the date and time on which the artifact was built +- `org.opencontainers.image.source` the URL of the Git repository containing the source files +- `org.opencontainers.image.revision` the Git branch and commit SHA1 of the source files + +The Artifact file is a gzip compressed TAR archive (`.tar.gz`), and +can be retrieved in-cluster from the `.status.artifact.url` HTTP address. + +#### Artifact example + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +status: + artifact: + digest: sha256:9f3bc0f341d4ecf2bab460cc59320a2a9ea292f01d7b96e32740a9abfd341088 + lastUpdateTime: "2025-08-08T09:35:45Z" + metadata: + org.opencontainers.image.created: "2025-08-08T12:31:41+03:00" + org.opencontainers.image.revision: 6.1.8/b3b00fe35424a45d373bf4c7214178bc36fd7872 + org.opencontainers.image.source: https://github.com/stefanprodan/podinfo.git + path: ocirepository///.tar.gz + revision: @ + size: 1105 + url: http://source-controller..svc.cluster.local./ocirepository///.tar.gz +``` + +#### Default exclusions + +The following files and extensions are excluded from the Artifact by +default: + +- Git files (`.git/, .gitignore, .gitmodules, .gitattributes`) +- File extensions (`.jpg, .jpeg, .gif, .png, .wmv, .flv, .tar.gz, .zip`) +- CI configs (`.github/, .circleci/, .travis.yml, .gitlab-ci.yml, appveyor.yml, .drone.yml, cloudbuild.yaml, codeship-services.yml, codeship-steps.yml`) +- CLI configs (`.goreleaser.yml, .sops.yaml`) +- Flux v1 config (`.flux.yaml`) + +To define your own exclusion rules, see [excluding files](#excluding-files). + +### Conditions + +OCIRepository has various states during its lifecycle, reflected as +[Kubernetes Conditions][typical-status-properties]. +It can be [reconciling](#reconciling-ocirepository) while fetching the remote +state, it can be [ready](#ready-ocirepository), or it can [fail during +reconciliation](#failed-ocirepository). + +The OCIRepository API is compatible with the [kstatus specification][kstatus-spec], +and reports `Reconciling` and `Stalled` conditions where applicable to +provide better (timeout) support to solutions polling the OCIRepository to +become `Ready`. + +#### Reconciling OCIRepository + +The source-controller marks an OCIRepository as _reconciling_ when one of the +following is true: + +- There is no current Artifact for the OCIRepository, or the reported Artifact + is determined to have disappeared from the storage. +- The generation of the OCIRepository is newer than the [Observed + Generation](#observed-generation). +- The newly resolved Artifact digest differs from the current Artifact. + +When the OCIRepository is "reconciling", the `Ready` Condition status becomes +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the OCIRepository's `.status.conditions`: + +- `type: Reconciling` +- `status: "True"` +- `reason: Progressing` | `reason: ProgressingWithRetry` + +If the reconciling state is due to a new revision, an additional Condition is +added with the following attributes: + +- `type: ArtifactOutdated` +- `status: "True"` +- `reason: NewRevision` + +Both Conditions have a ["negative polarity"][typical-status-properties], +and are only present on the OCIRepository while their status value is `"True"`. + +#### Ready OCIRepository + +The source-controller marks an OCIRepository as _ready_ when it has the +following characteristics: + +- The OCIRepository reports an [Artifact](#artifact). +- The reported Artifact exists in the controller's Artifact storage. +- The controller was able to communicate with the remote OCI repository using + the current spec. +- The digest of the reported Artifact is up-to-date with the latest + resolved digest of the remote OCI repository. + +When the OCIRepository is "ready", the controller sets a Condition with the +following attributes in the OCIRepository's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +This `Ready` Condition will retain a status value of `"True"` until the +OCIRepository is marked as [reconciling](#reconciling-ocirepository), or e.g. a +[transient error](#failed-ocirepository) occurs due to a temporary network issue. + +When the OCIRepository Artifact is archived in the controller's Artifact +storage, the controller sets a Condition with the following attributes in the +OCIRepository's `.status.conditions`: + +- `type: ArtifactInStorage` +- `status: "True"` +- `reason: Succeeded` + +This `ArtifactInStorage` Condition will retain a status value of `"True"` until +the Artifact in the storage no longer exists. + +#### Failed OCIRepository + +The source-controller may get stuck trying to produce an Artifact for a +OCIRepository without completing. This can occur due to some of the following +factors: + +- The remote OCI repository [URL](#url) is temporarily unavailable. +- The OCI repository does not exist. +- The [Secret reference](#secret-reference) contains a reference to a + non-existing Secret. +- The credentials in the referenced Secret are invalid. +- The OCIRepository spec contains a generic misconfiguration. +- A storage related failure when storing the artifact. + +When this happens, the controller sets the `Ready` Condition status to `False`, +and adds a Condition with the following attributes to the OCIRepository's +`.status.conditions`: + +- `type: FetchFailed` | `type: IncludeUnavailable` | `type: StorageOperationFailed` +- `status: "True"` +- `reason: AuthenticationFailed` | `reason: OCIArtifactPullFailed` | `reason: OCIArtifactLayerOperationFailed` + +This condition has a ["negative polarity"][typical-status-properties], +and is only present on the OCIRepository while the status value is `"True"`. +There may be more arbitrary values for the `reason` field to provide accurate +reason for a condition. + +In addition to the above Condition types, when the signature +[verification](#verification) fails. A condition with +the following attributes is added to the GitRepository's `.status.conditions`: + +- `type: SourceVerified` +- `status: "False"` +- `reason: VerificationError` + +While the OCIRepository has one or more of these Conditions, the controller +will continue to attempt to produce an Artifact for the resource with an +exponential backoff, until it succeeds and the OCIRepository is marked as +[ready](#ready-ocirepository). + +Note that a OCIRepository can be [reconciling](#reconciling-ocirepository) +while failing at the same time, for example due to a newly introduced +configuration issue in the OCIRepository spec. When a reconciliation fails, the +`Reconciling` Condition reason would be `ProgressingWithRetry`. When the +reconciliation is performed again after the failure, the reason is updated to +`Progressing`. + +### Observed Ignore + +The source-controller reports an observed ignore in the OCIRepository's +`.status.observedIgnore`. The observed ignore is the latest `.spec.ignore` value +which resulted in a [ready state](#ready-ocirepository), or stalled due to error +it can not recover from without human intervention. The value is the same as the +[ignore in spec](#ignore). It indicates the ignore rules used in building the +current artifact in storage. It is also used by the controller to determine if +an artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedIgnore: | + hpa.yaml + build + ... +``` + +### Observed Layer Selector + +The source-controller reports an observed layer selector in the OCIRepository's +`.status.observedLayerSelector`. The observed layer selector is the latest +`.spec.layerSelector` value which resulted in a [ready state](#ready-ocirepository), +or stalled due to error it can not recover from without human intervention. +The value is the same as the [layer selector in spec](#layer-selector). +It indicates the layer selection configuration used in building the current +artifact in storage. It is also used by the controller to determine if an +artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedLayerSelector: + mediaType: application/vnd.cncf.helm.chart.content.v1.tar+gzip + operation: copy + ... +``` + +### Observed Generation + +The source-controller reports an [observed generation][typical-status-properties] +in the OCIRepository's `.status.observedGeneration`. The observed generation is +the latest `.metadata.generation` which resulted in either a [ready state](#ready-ocirepository), +or stalled due to error it can not recover from without human +intervention. + +### Last Handled Reconcile At + +The source-controller reports the last `reconcile.fluxcd.io/requestedAt` +annotation value it acted on in the `.status.lastHandledReconcileAt` field. + +For practical information about this field, see [triggering a +reconcile](#triggering-a-reconcile). + +[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus +[image-pull-secrets]: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod +[image-auto-provider-secrets]: https://fluxcd.io/flux/guides/image-update/#imagerepository-cloud-providers-authentication +[pem-encoding]: https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail +[sops-guide]: https://fluxcd.io/flux/guides/mozilla-sops/ diff --git a/docs/spec/v1beta2/buckets.md b/docs/spec/v1beta2/buckets.md index 14d6a0d08..a78516f88 100644 --- a/docs/spec/v1beta2/buckets.md +++ b/docs/spec/v1beta2/buckets.md @@ -749,6 +749,83 @@ HTTP endpoint requires enabling [`.spec.insecure`](#insecure). Some endpoints require the specification of a [`.spec.region`](#region), see [Provider](#provider) for more (provider specific) examples. +### STS + +`.spec.sts` is an optional field for specifying the Security Token Service +configuration. A Security Token Service (STS) is a web service that issues +temporary security credentials. By adding this field, one may specify the +STS endpoint from where temporary credentials will be fetched. + +This field is only supported for the `aws` and `generic` bucket [providers](#provider). + +If using `.spec.sts`, the following fields are required: + +- `.spec.sts.provider`, the Security Token Service provider. The only supported + option for the `generic` bucket provider is `ldap`. The only supported option + for the `aws` bucket provider is `aws`. +- `.spec.sts.endpoint`, the HTTP/S endpoint of the Security Token Service. In + the case of `aws` this can be `https://sts.amazonaws.com`, or a Regional STS + Endpoint, or an Interface Endpoint created inside a VPC. In the case of + `ldap` this must be the LDAP server endpoint. + +When using the `ldap` provider, the following fields may also be specified: + +- `.spec.sts.secretRef.name`, the name of the Secret containing the LDAP + credentials. The Secret must contain the following keys: + - `username`, the username to authenticate with. + - `password`, the password to authenticate with. +- `.spec.sts.certSecretRef.name`, the name of the Secret containing the + TLS configuration for communicating with the STS endpoint. The contents + of this Secret must follow the same structure of + [`.spec.certSecretRef.name`](#cert-secret-reference). + +If [`.spec.proxySecretRef.name`](#proxy-secret-reference) is specified, +the proxy configuration will be used for commucating with the STS endpoint. + +Example for the `ldap` provider: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: example + namespace: example +spec: + interval: 5m + bucketName: example + provider: generic + endpoint: minio.example.com + sts: + provider: ldap + endpoint: https://ldap.example.com + secretRef: + name: ldap-credentials + certSecretRef: + name: ldap-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: ldap-credentials + namespace: example +type: Opaque +stringData: + username: + password: +--- +apiVersion: v1 +kind: Secret +metadata: + name: ldap-tls + namespace: example +type: kubernetes.io/tls # or Opaque +stringData: + tls.crt: + tls.key: + ca.crt: +``` + ### Bucket name `.spec.bucketName` is a required field that specifies which object storage @@ -763,6 +840,100 @@ See [Provider](#provider) for more (provider specific) examples. See [Provider](#provider) for more (provider specific) examples. +### Cert secret reference + +`.spec.certSecretRef.name` is an optional field to specify a secret containing +TLS certificate data. The secret can contain the following keys: + +* `tls.crt` and `tls.key`, to specify the client certificate and private key used +for TLS client authentication. These must be used in conjunction, i.e. +specifying one without the other will lead to an error. +* `ca.crt`, to specify the CA certificate used to verify the server, which is +required if the server is using a self-signed certificate. + +If the server is using a self-signed certificate and has TLS client +authentication enabled, all three values are required. + +The Secret should be of type `Opaque` or `kubernetes.io/tls`. All the files in +the Secret are expected to be [PEM-encoded][pem-encoding]. Assuming you have +three files; `client.key`, `client.crt` and `ca.crt` for the client private key, +client certificate and the CA certificate respectively, you can generate the +required Secret using the `flux create secret tls` command: + +```sh +flux create secret tls minio-tls --tls-key-file=client.key --tls-crt-file=client.crt --ca-crt-file=ca.crt +``` + +If TLS client authentication is not required, you can generate the secret with: + +```sh +flux create secret tls minio-tls --ca-crt-file=ca.crt +``` + +This API is only supported for the `generic` [provider](#provider). + +Example usage: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: Bucket +metadata: + name: example + namespace: example +spec: + interval: 5m + bucketName: example + provider: generic + endpoint: minio.example.com + certSecretRef: + name: minio-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-tls + namespace: example +type: kubernetes.io/tls # or Opaque +stringData: + tls.crt: + tls.key: + ca.crt: +``` + +### Proxy secret reference + +`.spec.proxySecretRef.name` is an optional field used to specify the name of a +Secret that contains the proxy settings for the object. These settings are used +for all the remote operations related to the Bucket. +The Secret can contain three keys: + +- `address`, to specify the address of the proxy server. This is a required key. +- `username`, to specify the username to use if the proxy server is protected by + basic authentication. This is an optional key. +- `password`, to specify the password to use if the proxy server is protected by + basic authentication. This is an optional key. + +Example: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: http-proxy +type: Opaque +stringData: + address: http://proxy.com + username: mandalorian + password: grogu +``` + +Proxying can also be configured in the source-controller Deployment directly by +using the standard environment variables such as `HTTPS_PROXY`, `ALL_PROXY`, etc. + +`.spec.proxySecretRef.name` takes precedence over all environment variables. + ### Insecure `.spec.insecure` is an optional field to allow connecting to an insecure (HTTP) diff --git a/docs/spec/v1beta2/helmcharts.md b/docs/spec/v1beta2/helmcharts.md index 2c06b23ef..3932a9694 100644 --- a/docs/spec/v1beta2/helmcharts.md +++ b/docs/spec/v1beta2/helmcharts.md @@ -202,6 +202,16 @@ spec: Values files also affect the generated artifact revision, see [artifact](#artifact). +### Ignore missing values files + +`.spec.ignoreMissingValuesFiles` is an optional field to specify whether missing +values files should be ignored rather than be considered errors. It defaults to +`false`. + +When `.spec.valuesFiles` and `.spec.ignoreMissingValuesFiles` are specified, +the `.status.observedValuesFiles` field is populated with the list of values +files that were found and actually contributed to the packaged chart. + ### Reconcile strategy `.spec.reconcileStrategy` is an optional field to specify what enables the @@ -252,15 +262,20 @@ For practical information, see **Note:** This feature is available only for Helm charts fetched from an OCI Registry. -`.spec.verify` is an optional field to enable the verification of [Cosign](https://github.com/sigstore/cosign) +`.spec.verify` is an optional field to enable the verification of [Cosign](https://github.com/sigstore/cosign) or [Notation](https://github.com/notaryproject/notation) signatures. The field offers three subfields: -- `.provider`, to specify the verification provider. Only supports `cosign` at present. +- `.provider`, to specify the verification provider. The supported options are `cosign` and `notation` at present. - `.secretRef.name`, to specify a reference to a Secret in the same namespace as - the HelmChart, containing the Cosign public keys of trusted authors. -- `.matchOIDCIdentity`, to specify a list of OIDC identity matchers. Please see + the HelmChart, containing the public keys of trusted authors. For Notation this Secret should also include the [trust policy](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/trust-store-trust-policy.md#trust-policy) in + addition to the CA certificate. +- `.matchOIDCIdentity`, to specify a list of OIDC identity matchers (only supported when using `cosign` as the verification provider). Please see [Keyless verification](#keyless-verification) for more details. +#### Cosign + +The `cosign` provider can be used to verify the signature of an OCI artifact using either a known public key or via the [Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure. + ```yaml --- apiVersion: source.toolkit.fluxcd.io/v1beta2 @@ -281,7 +296,7 @@ following attributes to the HelmChart's `.status.conditions`: - `status: "True"` - `reason: Succeeded` -#### Public keys verification +##### Public keys verification To verify the authenticity of HelmChart hosted in an OCI Registry, create a Kubernetes secret with the Cosign public keys: @@ -303,7 +318,7 @@ Note that the keys must have the `.pub` extension for Flux to make use of them. Flux will loop over the public keys and use them to verify a HelmChart's signature. This allows for older HelmCharts to be valid as long as the right key is in the secret. -#### Keyless verification +##### Keyless verification For publicly available HelmCharts, which are signed using the [Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure, @@ -362,6 +377,55 @@ instance hosted at [rekor.sigstore.dev](https://rekor.sigstore.dev/). Note that keyless verification is an **experimental feature**, using custom root CAs or self-hosted Rekor instances are not currently supported. +#### Notation + +The `notation` provider can be used to verify the signature of an OCI artifact using known +trust policy and CA certificate. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmChart +metadata: + name: podinfo +spec: + verify: + provider: notation + secretRef: + name: notation-config +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the HelmChart's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +To verify the authenticity of an OCI artifact, create a Kubernetes secret +containing Certificate Authority (CA) root certificates and the a `trust policy` + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: notation-config +type: Opaque +data: + certificate1.pem: + certificate2.crt: + trustpolicy.json: +``` + +Note that the CA certificates must have either `.pem` or `.crt` extension and your trust policy must +be named `trustpolicy.json` for Flux to make use of them. + +For more information on the signing and verification process see [Signing and Verification Workflow](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/signing-and-verification-workflow.md). + +Flux will loop over the certificates and use them to verify an artifact's signature. +This allows for older artifacts to be valid as long as the right certificate is in the secret. + ## Working with HelmCharts ### Triggering a reconcile diff --git a/docs/spec/v1beta2/ocirepositories.md b/docs/spec/v1beta2/ocirepositories.md index 4ef84823c..eb5de4c5f 100644 --- a/docs/spec/v1beta2/ocirepositories.md +++ b/docs/spec/v1beta2/ocirepositories.md @@ -157,9 +157,8 @@ to the IAM role when using IRSA. #### Azure -The `azure` provider can be used to authenticate automatically using Workload Identity, Kubelet Managed -Identity or Azure Active Directory pod-managed identity (aad-pod-identity), -and by extension gain access to ACR. +The `azure` provider can be used to authenticate automatically using Workload Identity and Kubelet Managed +Identity to gain access to ACR. ##### Kubelet Managed Identity @@ -215,41 +214,6 @@ a federated identity between the source-controller ServiceAccount and the identity. Patch the source-controller Deployment and ServiceAccount as shown in the patch above. Please take a look at this [guide](https://azure.github.io/azure-workload-identity/docs/quick-start.html#6-establish-federated-identity-credential-between-the-identity-and-the-service-account-issuer--subject). -##### Deprecated: AAD Pod Identity - -**Note:** The AAD Pod Identity project will be archived in [September 2023](https://github.com/Azure/aad-pod-identity#-announcement), -and you are advised to use Workload Identity instead. - -When using aad-pod-identity to enable access to ACR, add the following patch to -your bootstrap repository, in the `flux-system/kustomization.yaml` file: - -```yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - - gotk-components.yaml - - gotk-sync.yaml -patches: - - patch: | - - op: add - path: /spec/template/metadata/labels/aadpodidbinding - value: - target: - kind: Deployment - name: source-controller -``` - -When using pod-managed identity on an AKS cluster, AAD Pod Identity -has to be used to give the `source-controller` pod access to the ACR. -To do this, you have to install `aad-pod-identity` on your cluster, create a managed identity -that has access to the container registry (this can also be the Kubelet identity -if it has `AcrPull` role assignment on the ACR), create an `AzureIdentity` and `AzureIdentityBinding` -that describe the managed identity and then label the `source-controller` deployment -with the name of the AzureIdentity as shown in the patch above. Please take a look -at [this guide](https://azure.github.io/aad-pod-identity/docs/) or -[this one](https://docs.microsoft.com/en-us/azure/aks/use-azure-ad-pod-identity) -if you want to use AKS pod-managed identities add-on that is in preview. - #### GCP The `gcp` provider can be used to authenticate automatically using OAuth scopes @@ -279,7 +243,7 @@ patches: target: kind: ServiceAccount name: source-controller -``` +``` The Artifact Registry service uses the permission `artifactregistry.repositories.downloadArtifacts` that is located under the Artifact Registry Reader role. If you are using @@ -366,6 +330,47 @@ data: deprecated. If you have any Secrets using these keys and specified in an OCIRepository, the controller will log a deprecation warning. +### Proxy secret reference + +`.spec.proxySecretRef.name` is an optional field used to specify the name of a +Secret that contains the proxy settings for the object. These settings are used +for all the remote operations related to the OCIRepository. +The Secret can contain three keys: + +- `address`, to specify the address of the proxy server. This is a required key. +- `username`, to specify the username to use if the proxy server is protected by + basic authentication. This is an optional key. +- `password`, to specify the password to use if the proxy server is protected by + basic authentication. This is an optional key. + +Example: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: http-proxy +type: Opaque +stringData: + address: http://proxy.com + username: mandalorian + password: grogu +``` + +Proxying can also be configured in the source-controller Deployment directly by +using the standard environment variables such as `HTTPS_PROXY`, `ALL_PROXY`, etc. + +`.spec.proxySecretRef.name` takes precedence over all environment variables. + +**Warning:** [Cosign](https://github.com/sigstore/cosign) *keyless* +[verification](#verification) is not supported for this API. If you +require cosign keyless verification to use a proxy you must use the +standard environment variables mentioned above. If you specify a +`proxySecretRef` the controller will simply send out the requests +needed for keyless verification without the associated object-level +proxy settings. + ### Insecure `.spec.insecure` is an optional field to allow connecting to an insecure (HTTP) @@ -441,6 +446,37 @@ spec: This field takes precedence over [`.tag`](#tag-example). +#### SemverFilter example + +`.spec.ref.semverFilter` is an optional field to specify a SemVer filter to apply +when fetching tags from the OCI repository. The filter is a regular expression +that is applied to the tags fetched from the repository. Only tags that match +the filter are considered for the semver range resolution. + +**Note:** The filter is only taken into account when the `.spec.ref.semver` field +is set. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: oci://ghcr.io/stefanprodan/manifests/podinfo + ref: + # SemVer comparisons using constraints without a prerelease comparator will skip prerelease versions. + # Adding a `-0` suffix to the semver range will include prerelease versions. + semver: ">= 6.1.x-0" + semverFilter: ".*-rc.*" +``` + +In the above example, the controller fetches tags from the `ghcr.io/stefanprodan/manifests/podinfo` +repository and filters them using the regular expression `.*-rc.*`. Only tags that +contain the `-rc` suffix are considered for the semver range resolution. + #### Digest example To pull a specific digest, use `.spec.ref.digest`: @@ -454,7 +490,7 @@ metadata: spec: ref: digest: "sha256:" -``` +``` This field takes precedence over all other fields. @@ -501,14 +537,23 @@ for more information. ### Verification `.spec.verify` is an optional field to enable the verification of [Cosign](https://github.com/sigstore/cosign) +or [Notation](https://github.com/notaryproject/notation) signatures. The field offers three subfields: -- `.provider`, to specify the verification provider. Only supports `cosign` at present. +- `.provider`, to specify the verification provider. The supported options are `cosign` and `notation` at present. - `.secretRef.name`, to specify a reference to a Secret in the same namespace as - the OCIRepository, containing the Cosign public keys of trusted authors. -- `.matchOIDCIdentity`, to specify a list of OIDC identity matchers. Please see + the OCIRepository, containing the Cosign public keys of trusted authors. For Notation this Secret should also + include the [trust policy](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/trust-store-trust-policy.md#trust-policy) in + addition to the CA certificate. +- `.matchOIDCIdentity`, to specify a list of OIDC identity matchers (only supported when using `cosign` as the + verification provider). Please see [Keyless verification](#keyless-verification) for more details. +#### Cosign + +The `cosign` provider can be used to verify the signature of an OCI artifact using either a known public key +or via the [Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure. + ```yaml --- apiVersion: source.toolkit.fluxcd.io/v1beta2 @@ -529,7 +574,7 @@ following attributes to the OCIRepository's `.status.conditions`: - `status: "True"` - `reason: Succeeded` -#### Public keys verification +##### Public keys verification To verify the authenticity of an OCI artifact, create a Kubernetes secret with the Cosign public keys: @@ -551,7 +596,7 @@ Note that the keys must have the `.pub` extension for Flux to make use of them. Flux will loop over the public keys and use them to verify an artifact's signature. This allows for older artifacts to be valid as long as the right key is in the secret. -#### Keyless verification +##### Keyless verification For publicly available OCI artifacts, which are signed using the [Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure, @@ -593,6 +638,55 @@ instance hosted at [rekor.sigstore.dev](https://rekor.sigstore.dev/). Note that keyless verification is an **experimental feature**, using custom root CAs or self-hosted Rekor instances are not currently supported. +#### Notation + +The `notation` provider can be used to verify the signature of an OCI artifact using known +trust policy and CA certificate. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: +spec: + verify: + provider: notation + secretRef: + name: notation-config +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the OCIRepository's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +To verify the authenticity of an OCI artifact, create a Kubernetes secret +containing Certificate Authority (CA) root certificates and the a `trust policy` + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: notation-config +type: Opaque +data: + certificate1.pem: + certificate2.crt: + trustpolicy.json: +``` + +Note that the CA certificates must have either `.pem` or `.crt` extension and your trust policy must +be named `trustpolicy.json` for Flux to make use of them. + +For more information on the signing and verification process see [Signing and Verification Workflow](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/signing-and-verification-workflow.md). + +Flux will loop over the certificates and use them to verify an artifact's signature. +This allows for older artifacts to be valid as long as the right certificate is in the secret. + ### Suspend `.spec.suspend` is an optional field to suspend the reconciliation of a diff --git a/go.mod b/go.mod index 2264ddf25..21c15753e 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/fluxcd/source-controller -go 1.21 +go 1.25.0 replace github.com/fluxcd/source-controller/api => ./api @@ -9,94 +9,108 @@ replace github.com/fluxcd/source-controller/api => ./api replace github.com/opencontainers/go-digest => github.com/opencontainers/go-digest v1.0.1-0.20220411205349-bde1400a84be require ( - cloud.google.com/go/storage v1.35.1 - github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 - github.com/Masterminds/semver/v3 v3.2.1 - github.com/cyphar/filepath-securejoin v0.2.4 - github.com/distribution/distribution/v3 v3.0.0-alpha.1 - github.com/docker/cli v24.0.7+incompatible + cloud.google.com/go/compute/metadata v0.8.0 + cloud.google.com/go/storage v1.56.1 + github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 + github.com/Masterminds/semver/v3 v3.4.0 + github.com/cyphar/filepath-securejoin v0.4.1 + github.com/distribution/distribution/v3 v3.0.0 + github.com/docker/cli v28.4.0+incompatible github.com/docker/go-units v0.5.0 - github.com/fluxcd/cli-utils v0.36.0-flux.3 - github.com/fluxcd/pkg/apis/event v0.7.0 - github.com/fluxcd/pkg/apis/meta v1.3.0 - github.com/fluxcd/pkg/git v0.17.0 - github.com/fluxcd/pkg/git/gogit v0.17.0 - github.com/fluxcd/pkg/gittestserver v0.10.0 - github.com/fluxcd/pkg/helmtestserver v0.16.0 - github.com/fluxcd/pkg/lockedfile v0.1.0 - github.com/fluxcd/pkg/masktoken v0.2.0 - github.com/fluxcd/pkg/oci v0.35.0 - github.com/fluxcd/pkg/runtime v0.44.0 - github.com/fluxcd/pkg/sourceignore v0.5.0 - github.com/fluxcd/pkg/ssh v0.11.0 - github.com/fluxcd/pkg/tar v0.4.0 - github.com/fluxcd/pkg/testserver v0.5.0 - github.com/fluxcd/pkg/version v0.2.2 - github.com/fluxcd/source-controller/api v1.2.1 - github.com/foxcpp/go-mockdns v1.0.0 - github.com/go-git/go-billy/v5 v5.5.0 - github.com/go-git/go-git/v5 v5.11.0 - github.com/go-logr/logr v1.4.1 - github.com/google/go-containerregistry v0.18.0 - github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20231202142526-55ffb0092afd + github.com/elazarl/goproxy v1.7.2 + github.com/fluxcd/cli-utils v0.36.0-flux.15 + github.com/fluxcd/pkg/apis/event v0.19.0 + github.com/fluxcd/pkg/apis/meta v1.21.0 + github.com/fluxcd/pkg/artifact v0.3.0 + github.com/fluxcd/pkg/auth v0.31.0 + github.com/fluxcd/pkg/cache v0.11.0 + github.com/fluxcd/pkg/git v0.36.0 + github.com/fluxcd/pkg/git/gogit v0.40.0 + github.com/fluxcd/pkg/gittestserver v0.20.0 + github.com/fluxcd/pkg/helmtestserver v0.30.0 + github.com/fluxcd/pkg/http/transport v0.7.0 + github.com/fluxcd/pkg/masktoken v0.8.0 + github.com/fluxcd/pkg/oci v0.56.0 + github.com/fluxcd/pkg/runtime v0.84.0 + github.com/fluxcd/pkg/sourceignore v0.14.0 + github.com/fluxcd/pkg/ssh v0.21.0 + github.com/fluxcd/pkg/tar v0.14.0 + github.com/fluxcd/pkg/testserver v0.13.0 + github.com/fluxcd/pkg/version v0.10.0 + github.com/fluxcd/source-controller/api v1.7.0 + github.com/foxcpp/go-mockdns v1.1.0 + github.com/go-git/go-billy/v5 v5.6.2 + github.com/go-git/go-git/v5 v5.16.2 + github.com/go-logr/logr v1.4.3 + github.com/google/go-containerregistry v0.20.6 + github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20250613215107-59a4b8593039 github.com/google/uuid v1.6.0 - github.com/minio/minio-go/v7 v7.0.66 - github.com/onsi/gomega v1.31.1 + github.com/minio/minio-go/v7 v7.0.94 + github.com/notaryproject/notation-core-go v1.3.0 + github.com/notaryproject/notation-go v1.3.2 + github.com/onsi/gomega v1.38.2 github.com/opencontainers/go-digest v1.0.0 - github.com/opencontainers/go-digest/blake3 v0.0.0-20231025023718-d50d2fec9c98 - github.com/ory/dockertest/v3 v3.10.0 - github.com/otiai10/copy v1.14.0 + github.com/opencontainers/image-spec v1.1.1 + github.com/ory/dockertest/v3 v3.12.0 + github.com/otiai10/copy v1.14.1 github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 - github.com/prometheus/client_golang v1.18.0 - github.com/sigstore/cosign/v2 v2.2.3 - github.com/sigstore/sigstore v1.8.1 + github.com/prometheus/client_golang v1.23.0 + github.com/sigstore/cosign/v2 v2.5.2 + github.com/sigstore/sigstore v1.9.5 github.com/sirupsen/logrus v1.9.3 - github.com/spf13/pflag v1.0.5 - golang.org/x/crypto v0.18.0 - golang.org/x/sync v0.6.0 - google.golang.org/api v0.161.0 - gotest.tools v2.2.0+incompatible - helm.sh/helm/v3 v3.13.3 - k8s.io/api v0.28.6 - k8s.io/apimachinery v0.28.6 - k8s.io/client-go v0.28.6 - k8s.io/utils v0.0.0-20231127182322-b307cd553661 - sigs.k8s.io/controller-runtime v0.16.3 - sigs.k8s.io/yaml v1.4.0 + github.com/spf13/pflag v1.0.10 + golang.org/x/crypto v0.41.0 + golang.org/x/oauth2 v0.30.0 + golang.org/x/sync v0.16.0 + google.golang.org/api v0.248.0 + helm.sh/helm/v3 v3.19.0 + k8s.io/api v0.34.0 + k8s.io/apimachinery v0.34.0 + k8s.io/client-go v0.34.0 + k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 + oras.land/oras-go/v2 v2.6.0 + sigs.k8s.io/controller-runtime v0.22.1 + sigs.k8s.io/yaml v1.6.0 ) require ( - cloud.google.com/go v0.111.0 // indirect - cloud.google.com/go/compute v1.23.3 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.5 // indirect - dario.cat/mergo v1.0.0 // indirect - filippo.io/edwards25519 v1.1.0 // indirect - github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 // indirect + cel.dev/expr v0.24.0 // indirect + cloud.google.com/go v0.121.6 // indirect + cloud.google.com/go/auth v0.16.5 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect + cloud.google.com/go/iam v1.5.2 // indirect + cloud.google.com/go/monitoring v1.24.2 // indirect + dario.cat/mergo v1.0.1 // indirect + github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 // indirect github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect - github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/Azure/azure-sdk-for-go/sdk/containers/azcontainerregistry v0.2.3 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice v1.0.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.29 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect - github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect - github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect - github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect - github.com/Azure/go-autorest/logger v0.2.1 // indirect - github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect - github.com/BurntSushi/toml v1.3.2 // indirect + github.com/Azure/go-autorest/autorest v0.11.30 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.24 // indirect + github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 // indirect + github.com/Azure/go-autorest/autorest/azure/cli v0.4.7 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.1 // indirect + github.com/Azure/go-autorest/logger v0.2.2 // indirect + github.com/Azure/go-autorest/tracing v0.6.1 // indirect + github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect + github.com/BurntSushi/toml v1.5.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Masterminds/squirrel v1.5.4 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/Microsoft/hcsshim v0.11.4 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect - github.com/ProtonMail/go-crypto v1.0.0 // indirect + github.com/ProtonMail/go-crypto v1.3.0 // indirect github.com/ThalesIgnite/crypto11 v1.2.5 // indirect github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 // indirect github.com/alibabacloud-go/cr-20160607 v1.0.1 // indirect @@ -108,277 +122,308 @@ require ( github.com/alibabacloud-go/tea v1.2.1 // indirect github.com/alibabacloud-go/tea-utils v1.4.5 // indirect github.com/alibabacloud-go/tea-xml v1.1.3 // indirect - github.com/aliyun/credentials-go v1.3.1 // indirect + github.com/aliyun/credentials-go v1.3.2 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go-v2 v1.24.1 // indirect - github.com/aws/aws-sdk-go-v2/config v1.26.6 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.16.16 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 // indirect - github.com/aws/aws-sdk-go-v2/service/ecr v1.24.7 // indirect - github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.21.5 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 // indirect - github.com/aws/smithy-go v1.19.0 // indirect - github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8 // indirect + github.com/aws/aws-sdk-go-v2 v1.38.3 // indirect + github.com/aws/aws-sdk-go-v2/config v1.31.6 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.18.10 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ecr v1.50.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.37.2 // indirect + github.com/aws/aws-sdk-go-v2/service/eks v1.73.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.29.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.38.2 // indirect + github.com/aws/smithy-go v1.23.0 // indirect + github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.9.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver v3.5.1+incompatible // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/bradleyfalzon/ghinstallation/v2 v2.16.0 // indirect github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect - github.com/buildkite/agent/v3 v3.62.0 // indirect - github.com/buildkite/go-pipeline v0.3.2 // indirect - github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251 // indirect - github.com/cenkalti/backoff/v4 v4.2.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/buildkite/agent/v3 v3.98.2 // indirect + github.com/buildkite/go-pipeline v0.13.3 // indirect + github.com/buildkite/interpolate v0.1.5 // indirect + github.com/buildkite/roko v1.3.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect github.com/clbanning/mxj/v2 v2.7.0 // indirect - github.com/cloudflare/circl v1.3.7 // indirect + github.com/cloudflare/circl v1.6.1 // indirect + github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be // indirect - github.com/containerd/containerd v1.7.12 // indirect - github.com/containerd/continuity v0.4.2 // indirect + github.com/containerd/containerd v1.7.28 // indirect + github.com/containerd/continuity v0.4.5 // indirect + github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/log v0.1.0 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect - github.com/coreos/go-oidc/v3 v3.9.0 // indirect + github.com/containerd/platforms v0.2.1 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect + github.com/coreos/go-oidc/v3 v3.15.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/distribution/reference v0.5.0 // indirect + github.com/distribution/reference v0.6.0 // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v24.0.7+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.0 // indirect - github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/docker v28.3.3+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.3 // indirect + github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect - github.com/evanphx/json-patch v5.7.0+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.7.0 // indirect + github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect + github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect + github.com/evanphx/json-patch v5.9.11+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect - github.com/fatih/color v1.15.0 // indirect + github.com/fatih/color v1.18.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fluxcd/gitkit v0.6.0 // indirect - github.com/fluxcd/pkg/apis/acl v0.1.0 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fluxcd/pkg/apis/acl v0.9.0 // indirect + github.com/fluxcd/pkg/lockedfile v0.7.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect github.com/go-chi/chi v4.1.2+incompatible // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect - github.com/go-jose/go-jose/v3 v3.0.1 // indirect + github.com/go-ini/ini v1.67.0 // indirect + github.com/go-jose/go-jose/v3 v3.0.4 // indirect + github.com/go-jose/go-jose/v4 v4.1.1 // indirect + github.com/go-ldap/ldap/v3 v3.4.10 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect - github.com/go-openapi/analysis v0.22.0 // indirect - github.com/go-openapi/errors v0.21.0 // indirect - github.com/go-openapi/jsonpointer v0.20.2 // indirect - github.com/go-openapi/jsonreference v0.20.4 // indirect - github.com/go-openapi/loads v0.21.5 // indirect - github.com/go-openapi/runtime v0.27.1 // indirect - github.com/go-openapi/spec v0.20.13 // indirect - github.com/go-openapi/strfmt v0.22.0 // indirect - github.com/go-openapi/swag v0.22.9 // indirect - github.com/go-openapi/validate v0.22.4 // indirect - github.com/go-piv/piv-go v1.11.0 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect + github.com/go-openapi/errors v0.22.1 // indirect + github.com/go-openapi/jsonpointer v0.21.1 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/runtime v0.28.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/strfmt v0.23.0 // indirect + github.com/go-openapi/swag v0.23.1 // indirect + github.com/go-openapi/validate v0.24.0 // indirect + github.com/go-piv/piv-go/v2 v2.3.0 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/gobwas/glob v0.2.3 // indirect + github.com/goccy/go-json v0.10.5 // indirect github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang-jwt/jwt/v5 v5.2.0 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang-jwt/jwt/v4 v4.5.2 // indirect + github.com/golang-jwt/jwt/v5 v5.3.0 // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/btree v1.1.2 // indirect - github.com/google/certificate-transparency-go v1.1.7 // indirect - github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa // indirect - github.com/google/go-github/v55 v55.0.0 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/certificate-transparency-go v1.3.2 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20250225234217-098045d5e61f // indirect + github.com/google/go-github/v72 v72.0.0 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect - github.com/google/s2a-go v0.1.7 // indirect + github.com/google/s2a-go v0.1.9 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect - github.com/gorilla/handlers v1.5.1 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect + github.com/googleapis/gax-go/v2 v2.15.0 // indirect + github.com/gorilla/handlers v1.5.2 // indirect github.com/gorilla/mux v1.8.1 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/gosuri/uitable v0.0.4 // indirect + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-retryablehttp v0.7.5 // indirect + github.com/hashicorp/go-retryablehttp v0.7.8 // indirect github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect - github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect - github.com/hashicorp/hcl v1.0.1-vault-5 // indirect - github.com/huandu/xstrings v1.4.0 // indirect - github.com/imdario/mergo v0.3.16 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/huandu/xstrings v1.5.0 // indirect + github.com/in-toto/attestation v1.1.1 // indirect github.com/in-toto/in-toto-golang v0.9.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/jmoiron/sqlx v1.3.5 // indirect + github.com/jmoiron/sqlx v1.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/klauspost/compress v1.17.4 // indirect - github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect - github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491 // indirect + github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect github.com/lib/pq v1.10.9 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/magiconair/properties v1.8.7 // indirect - github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mailru/easyjson v0.9.0 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.15 // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect - github.com/miekg/dns v1.1.55 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/miekg/dns v1.1.58 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect + github.com/minio/crc64nvme v1.0.1 // indirect github.com/minio/md5-simd v1.1.2 // indirect - github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/locker v1.0.1 // indirect - github.com/moby/spdystream v0.2.0 // indirect - github.com/moby/term v0.5.0 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/spdystream v0.5.0 // indirect + github.com/moby/sys/user v0.3.0 // indirect + github.com/moby/term v0.5.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect - github.com/morikuni/aec v1.0.0 // indirect - github.com/mozillazg/docker-credential-acr-helper v0.3.0 // indirect + github.com/mozillazg/docker-credential-acr-helper v0.4.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/notaryproject/notation-plugin-framework-go v1.0.0 // indirect + github.com/notaryproject/tspclient-go v1.0.0 // indirect github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect github.com/oklog/ulid v1.3.1 // indirect - github.com/oleiade/reflections v1.0.1 // indirect - github.com/opencontainers/image-spec v1.1.0-rc5 // indirect - github.com/opencontainers/runc v1.1.5 // indirect + github.com/oleiade/reflections v1.1.0 // indirect + github.com/opencontainers/go-digest/blake3 v0.0.0-20250813155314-89707e38ad1a // indirect + github.com/opencontainers/runc v1.2.4 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/otiai10/mint v1.6.3 // indirect github.com/pborman/uuid v1.2.1 // indirect - github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect - github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect + github.com/pjbgf/sha1cd v0.4.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.45.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/otlptranslator v0.0.2 // indirect + github.com/prometheus/procfs v0.17.0 // indirect github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 // indirect - github.com/redis/go-redis/v9 v9.3.0 // indirect - github.com/rivo/uniseg v0.4.4 // indirect - github.com/rs/xid v1.5.0 // indirect - github.com/rubenv/sql-migrate v1.5.2 // indirect + github.com/redis/go-redis/v9 v9.8.0 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/rs/xid v1.6.0 // indirect + github.com/rubenv/sql-migrate v1.8.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sagikazarmark/locafero v0.4.0 // indirect - github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sagikazarmark/locafero v0.7.0 // indirect + github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect github.com/sassoftware/relic v7.2.1+incompatible // indirect - github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect github.com/segmentio/ksuid v1.0.4 // indirect - github.com/sergi/go-diff v1.3.1 // indirect + github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect - github.com/shopspring/decimal v1.3.1 // indirect - github.com/sigstore/fulcio v1.4.3 // indirect - github.com/sigstore/rekor v1.3.4 // indirect - github.com/sigstore/timestamp-authority v1.2.1 // indirect - github.com/skeema/knownhosts v1.2.1 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + github.com/sigstore/fulcio v1.7.1 // indirect + github.com/sigstore/protobuf-specs v0.4.3 // indirect + github.com/sigstore/rekor v1.3.10 // indirect + github.com/sigstore/sigstore-go v1.0.0 // indirect + github.com/sigstore/timestamp-authority v1.2.8 // indirect + github.com/skeema/knownhosts v1.3.1 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.6.0 // indirect - github.com/spf13/cobra v1.8.0 // indirect - github.com/spf13/viper v1.18.2 // indirect - github.com/spiffe/go-spiffe/v2 v2.1.7 // indirect - github.com/stretchr/objx v0.5.1 // indirect + github.com/spf13/afero v1.12.0 // indirect + github.com/spf13/cast v1.7.1 // indirect + github.com/spf13/cobra v1.10.1 // indirect + github.com/spf13/viper v1.20.1 // indirect + github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/thales-e-security/pool v0.0.2 // indirect github.com/theupdateframework/go-tuf v0.7.0 // indirect + github.com/theupdateframework/go-tuf/v2 v2.1.1 // indirect + github.com/tinylib/msgp v1.3.0 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/tjfoc/gmsm v1.4.1 // indirect github.com/transparency-dev/merkle v0.0.2 // indirect - github.com/vbatts/tar-split v0.11.5 // indirect - github.com/xanzy/go-gitlab v0.96.0 // indirect + github.com/vbatts/tar-split v0.12.1 // indirect + github.com/veraison/go-cose v1.3.0 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect github.com/zeebo/blake3 v0.2.3 // indirect - github.com/zeebo/errs v1.3.0 // indirect - go.mongodb.org/mongo-driver v1.13.1 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/exporters/autoexport v0.46.1 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect - go.opentelemetry.io/otel v1.22.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.44.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0 // indirect - go.opentelemetry.io/otel/metric v1.22.0 // indirect - go.opentelemetry.io/otel/sdk v1.22.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.21.0 // indirect - go.opentelemetry.io/otel/trace v1.22.0 // indirect - go.opentelemetry.io/proto/otlp v1.0.0 // indirect - go.starlark.net v0.0.0-20231121155337-90ade8b19d09 // indirect - go.step.sm/crypto v0.42.1 // indirect + github.com/zeebo/errs v1.4.0 // indirect + gitlab.com/gitlab-org/api/client-go v0.130.1 // indirect + go.mongodb.org/mongo-driver v1.14.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/bridges/prometheus v0.63.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.38.0 // indirect + go.opentelemetry.io/contrib/exporters/autoexport v0.63.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.60.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 // indirect + go.opentelemetry.io/otel/log v0.14.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/sdk v1.38.0 // indirect + go.opentelemetry.io/otel/sdk/log v0.14.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.opentelemetry.io/proto/otlp v1.8.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.26.0 // indirect - golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb // indirect - golang.org/x/mod v0.14.0 // indirect - golang.org/x/net v0.20.0 // indirect - golang.org/x/oauth2 v0.16.0 // indirect - golang.org/x/sys v0.16.0 // indirect - golang.org/x/term v0.16.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.17.0 // indirect - gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac // indirect - google.golang.org/grpc v1.61.0 // indirect - google.golang.org/protobuf v1.32.0 // indirect - gopkg.in/evanphx/json-patch.v5 v5.7.0 // indirect - gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect + go.uber.org/zap v1.27.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect + golang.org/x/mod v0.27.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/term v0.34.0 // indirect + golang.org/x/text v0.28.0 // indirect + golang.org/x/time v0.12.0 // indirect + golang.org/x/tools v0.36.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect + google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/grpc v1.75.0 // indirect + google.golang.org/protobuf v1.36.8 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.28.6 // indirect - k8s.io/apiserver v0.28.6 // indirect - k8s.io/cli-runtime v0.28.6 // indirect - k8s.io/component-base v0.28.6 // indirect - k8s.io/klog/v2 v2.110.1 // indirect - k8s.io/kube-openapi v0.0.0-20231206194836-bf4651e18aa8 // indirect - k8s.io/kubectl v0.28.6 // indirect - oras.land/oras-go v1.2.4 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kustomize/api v0.16.0 // indirect - sigs.k8s.io/kustomize/kyaml v0.16.0 // indirect - sigs.k8s.io/release-utils v0.7.7 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + k8s.io/apiextensions-apiserver v0.34.0 // indirect + k8s.io/apiserver v0.34.0 // indirect + k8s.io/cli-runtime v0.34.0 // indirect + k8s.io/component-base v0.34.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + k8s.io/kubectl v0.34.0 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/kustomize/api v0.20.1 // indirect + sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/release-utils v0.11.1 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect ) retract v0.32.0 // Refers to incorrect ./api version. diff --git a/go.sum b/go.sum index 57fa1c7e1..369cd9509 100644 --- a/go.sum +++ b/go.sum @@ -1,100 +1,138 @@ +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.111.0 h1:YHLKNupSD1KqjDbQ3+LVdQ81h/UJbJyZG203cEfnQgM= -cloud.google.com/go v0.111.0/go.mod h1:0mibmpKP1TyOOFYQY5izo0LnT+ecvOQ0Sg3OdmMiNRU= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= -cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/kms v1.15.5 h1:pj1sRfut2eRbD9pFRjNnPNg/CzJPuQAzUujMIM1vVeM= -cloud.google.com/go/kms v1.15.5/go.mod h1:cU2H5jnp6G2TDpUGZyqTCoy1n16fbubHZjmVXSMtwDI= -cloud.google.com/go/storage v1.35.1 h1:B59ahL//eDfx2IIKFBeT5Atm9wnNmj3+8xG/W4WB//w= -cloud.google.com/go/storage v1.35.1/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= +cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= +cloud.google.com/go/auth v0.16.5 h1:mFWNQ2FEVWAliEQWpAdH80omXFokmrnbDhUS9cBywsI= +cloud.google.com/go/auth v0.16.5/go.mod h1:utzRfHMP+Vv0mpOkTRQoWD2q3BatTOoWbA7gCc2dUhQ= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.8.0 h1:HxMRIbao8w17ZX6wBnjhcDkW6lTFpgcaobyVfZWqRLA= +cloud.google.com/go/compute/metadata v0.8.0/go.mod h1:sYOGTp851OV9bOFJ9CH7elVvyzopvWQFNNghtDQ/Biw= +cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= +cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= +cloud.google.com/go/kms v1.22.0 h1:dBRIj7+GDeeEvatJeTB19oYZNV0aj6wEqSIT/7gLqtk= +cloud.google.com/go/kms v1.22.0/go.mod h1:U7mf8Sva5jpOb4bxYZdtw/9zsbIjrklYwPcvMk34AL8= +cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc= +cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= +cloud.google.com/go/storage v1.56.1 h1:n6gy+yLnHn0hTwBFzNn8zJ1kqWfR91wzdM8hjRF4wP0= +cloud.google.com/go/storage v1.56.1/go.mod h1:C9xuCZgFl3buo2HZU/1FncgvvOgTAs/rnh4gF4lMg0s= +cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= +cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= +cuelabs.dev/go/oci/ociregistry v0.0.0-20241125120445-2c00c104c6e1 h1:mRwydyTyhtRX2wXS3mqYWzR2qlv6KsmoKXmlz5vInjg= +cuelabs.dev/go/oci/ociregistry v0.0.0-20241125120445-2c00c104c6e1/go.mod h1:5A4xfTzHTXfeVJBU6RAUf+QrlfTCW+017q/QiW+sMLg= +cuelang.org/go v0.12.1 h1:5I+zxmXim9MmiN2tqRapIqowQxABv2NKTgbOspud1Eo= +cuelang.org/go v0.12.1/go.mod h1:B4+kjvGGQnbkz+GuAv1dq/R308gTkp0sO28FdMrJ2Kw= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= -github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230618160516-e936619f9f18 h1:rd389Q26LMy03gG4anandGFC2LW/xvjga5GezeeaxQk= -github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230618160516-e936619f9f18/go.mod h1:fgJuSBrJP5qZtKqaMJE0hmhS2tmRH+44IkfZvjtaf1M= -github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 h1:8+4G8JaejP8Xa6W46PzJEwisNgBXMvFcz78N6zG/ARw= -github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0/go.mod h1:GgeIE+1be8Ivm7Sh4RgwI42aTtC9qrcj+Y9Y6CjJhJs= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 h1:kcnfY4vljxXliXDBrA9K9lwF8IoEZ4Up6Eg9kWTIm28= +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0/go.mod h1:tlqp9mUGbsP+0z3Q+c0Q5MgSdq/OMwQhm5bffR3Q3ss= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1 h1:MyVTgWR8qd/Jw1Le0NZebGBUCLbtak3bJ3z1OlqZBpw= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1/go.mod h1:GpPjLhVR9dnUoJMyHWSPy71xY9/lcmpzIPZXmF0FCVY= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 h1:AMf7YbZOZIW5b66cXNHMWWT/zkjhz5+a+k/3x40EO7E= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1/go.mod h1:uwfk06ZBcvL/g4VHNjurPfVln9NMbsk2XIZxJ+hu81k= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.0 h1:ci6Yd6nysBRLEodoziB6ah1+YOzZbZk+NYneoA6q+6E= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.0/go.mod h1:QyVsSSN64v5TGltphKLQ2sQxe4OBQg0J1eKRcVBnfgE= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0 h1:MhRfI58HblXzCtWEZCO0feHs8LweePB3s90r7WaR1KU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0/go.mod h1:okZ+ZURbArNdlJ+ptXoyHNuOETzOl1Oww19rm8I2WLA= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= +github.com/Azure/azure-sdk-for-go/sdk/containers/azcontainerregistry v0.2.3 h1:ldKsKtEIblsgsr6mPwrd9yRntoX6uLz/K89wsldwx/k= +github.com/Azure/azure-sdk-for-go/sdk/containers/azcontainerregistry v0.2.3/go.mod h1:MAm7bk0oDLmD8yIkvfbxPW04fxzphPyL+7GzwHxOp6Y= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice v1.0.0 h1:figxyQZXzZQIcP3njhC68bYUiTw45J8/SsHaLW8Ax0M= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice v1.0.0/go.mod h1:TmlMW4W5OvXOmOyKNnor8nlMMiO1ctIyzmHme/VHsrA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.3.1 h1:Wgf5rZba3YZqeTNJPtvqZoBu1sBN/L4sry+u2U3Y75w= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.3.1/go.mod h1:xxCBG/f/4Vbmh2XQJBsOmNdxWUY5j/s27jujKPbQf14= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.1 h1:bFWuoEKg+gImo7pvkiQEFAc8ocibADgXeiLAxWhWmkI= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.1/go.mod h1:Vih/3yc6yac2JzU4hzpaDupBJP0Flaia9rXXrU8xyww= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 h1:FwladfywkNirM+FZYLBR2kBz5C8Tg0fw5w5Y7meRXWI= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2/go.mod h1:vv5Ad0RrIoT1lJFdWBZwt4mB1+j+V8DUroixmKDTCdk= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= -github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= -github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= +github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= +github.com/Azure/go-autorest/autorest v0.11.30 h1:iaZ1RGz/ALZtN5eq4Nr1SOFSlf2E4pDI3Tcsl+dZPVE= +github.com/Azure/go-autorest/autorest v0.11.30/go.mod h1:t1kpPIOpIVX7annvothKvb0stsrXa37i7b+xpmBW8Fs= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= -github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= -github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc= +github.com/Azure/go-autorest/autorest/adal v0.9.24 h1:BHZfgGsGwdkHDyZdtQRQk1WeUdW0m2WPAwuHZwUi5i4= +github.com/Azure/go-autorest/autorest/adal v0.9.24/go.mod h1:7T1+g0PYFmACYW5LlG2fcoPiPlFHjClyRGL7dRlP5c8= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 h1:Ov8avRZi2vmrE2JcXw+tu5K/yB41r7xK9GZDiBF7NdM= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13/go.mod h1:5BAVfWLWXihP47vYrPuBKKf4cS0bXI+KM9Qx6ETDJYo= github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.7 h1:Q9R3utmFg9K1B4OYtAZ7ZUUvIUdzQt7G2MN5Hi/d670= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.7/go.mod h1:bVrAueELJ0CKLBpUHDIvD516TwmHmzqwCpvONWRsw3s= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/date v0.3.1 h1:o9Z8Jyt+VJJTCZ/UORishuHOusBwolhjokt9s5k8I4w= +github.com/Azure/go-autorest/autorest/date v0.3.1/go.mod h1:Dz/RDmXlfiFFS/eW+b/xMUSFs1tboPVy6UjgADToWDM= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/logger v0.2.2 h1:hYqBsEBywrrOSW24kkOCXRcKfKhK76OzLTfF+MYDE2o= +github.com/Azure/go-autorest/logger v0.2.2/go.mod h1:I5fg9K52o+iuydlWfa9T5K6WFos9XYr9dYTFzpqgibw= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/Azure/go-autorest/tracing v0.6.1 h1:YUMSrC/CeD1ZnnXcNYU4a/fzsO35u2Fsful9L/2nyR0= +github.com/Azure/go-autorest/tracing v0.6.1/go.mod h1:/3EgjbsjraOqiicERAeu3m7/z0x1TzjQGAwDrJrXGkc= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= -github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= -github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= +github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 h1:owcC2UnmsZycprQ5RfRgjydWhuoxg71LUfyiQdijZuM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0/go.mod h1:ZPpqegjbE99EPKsu3iUWV22A04wzGPcAY/ziSIQEEgs= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0 h1:4LP6hvB4I5ouTbGgWtixJhgED6xdf67twf9PoY96Tbg= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0/go.mod h1:jUZ5LYlw40WMd07qxcQJD5M40aUxrfwqQX1g7zxYnrQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 h1:Ron4zCA/yk6U7WOBXhTJcDpsUBG9npumK6xw2auFltQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0/go.mod h1:cSgYe11MCNYunTnRXrKiR/tHc0eoKjICUuWpNZoVCOo= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= -github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= -github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= -github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= -github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= -github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw= +github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE= github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= +github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM= +github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= +github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI= +github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.2/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 h1:iC9YFYKDGEy3n/FtqJnOkZsene9olVspKmkX5A2YBEo= github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= @@ -135,56 +173,62 @@ github.com/alibabacloud-go/tea-xml v1.1.2/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCE github.com/alibabacloud-go/tea-xml v1.1.3 h1:7LYnm+JbOq2B+T/B0fHC4Ies4/FofC4zHzYtqw7dgt0= github.com/alibabacloud-go/tea-xml v1.1.3/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8= github.com/aliyun/credentials-go v1.1.2/go.mod h1:ozcZaMR5kLM7pwtCMEpVmQ242suV6qTJya2bDq4X1Tw= -github.com/aliyun/credentials-go v1.3.1 h1:uq/0v7kWrxmoLGpqjx7vtQ/s03f0zR//0br/xWDTE28= -github.com/aliyun/credentials-go v1.3.1/go.mod h1:8jKYhQuDawt8x2+fusqa1Y6mPxemTsBEN04dgcAcYz0= +github.com/aliyun/credentials-go v1.3.2 h1:L4WppI9rctC8PdlMgyTkF8bBsy9pyKQEzBD1bHMRl+g= +github.com/aliyun/credentials-go v1.3.2/go.mod h1:tlpz4uys4Rn7Ik4/piGRrTbXy2uLKvePgQJJduE+Y5c= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.50.0 h1:HBtrLeO+QyDKnc3t1+5DR1RxodOHCGr8ZcrHudpv7jI= -github.com/aws/aws-sdk-go v1.50.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= -github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= -github.com/aws/aws-sdk-go-v2/config v1.26.6 h1:Z/7w9bUqlRI0FFQpetVuFYEsjzE3h7fpU6HuGmfPL/o= -github.com/aws/aws-sdk-go-v2/config v1.26.6/go.mod h1:uKU6cnDmYCvJ+pxO9S4cWDb2yWWIH5hra+32hVh1MI4= -github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= -github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 h1:n3GDfwqF2tzEkXlv5cuy4iy7LpKDtqDMcNLfZDu9rls= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= -github.com/aws/aws-sdk-go-v2/service/ecr v1.24.7 h1:3iaT/LnGV6jNtbBkvHZDlzz7Ky3wMHDJAyFtGd5GUJI= -github.com/aws/aws-sdk-go-v2/service/ecr v1.24.7/go.mod h1:mtzCLxk6M+KZbkJdq3cUH9GCrudw8qCy5C3EHO+5vLc= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.21.5 h1:PQp21GBlGNaQ+AVJAB8w2KTmLx0DkFS2fDET2Iy3+f0= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.21.5/go.mod h1:WMntdAol8KgeYsa5sDZPsRTXs4jVZIMYu0eQVVIQxnc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= -github.com/aws/aws-sdk-go-v2/service/kms v1.27.9 h1:W9PbZAZAEcelhhjb7KuwUtf+Lbc+i7ByYJRuWLlnxyQ= -github.com/aws/aws-sdk-go-v2/service/kms v1.27.9/go.mod h1:2tFmR7fQnOdQlM2ZCEPpFnBIQD1U8wmXmduBgZbOag0= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= -github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= -github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= -github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8 h1:SoFYaT9UyGkR0+nogNyD/Lj+bsixB+SNuAS4ABlEs6M= -github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8/go.mod h1:2JF49jcDOrLStIXN/j/K1EKRq8a8R2qRnlZA6/o/c7c= +github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE= +github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.38.3 h1:B6cV4oxnMs45fql4yRH+/Po/YU+597zgWqvDpYMturk= +github.com/aws/aws-sdk-go-v2 v1.38.3/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= +github.com/aws/aws-sdk-go-v2/config v1.31.6 h1:a1t8fXY4GT4xjyJExz4knbuoxSCacB5hT/WgtfPyLjo= +github.com/aws/aws-sdk-go-v2/config v1.31.6/go.mod h1:5ByscNi7R+ztvOGzeUaIu49vkMk2soq5NaH5PYe33MQ= +github.com/aws/aws-sdk-go-v2/credentials v1.18.10 h1:xdJnXCouCx8Y0NncgoptztUocIYLKeQxrCgN6x9sdhg= +github.com/aws/aws-sdk-go-v2/credentials v1.18.10/go.mod h1:7tQk08ntj914F/5i9jC4+2HQTAuJirq7m1vZVIhEkWs= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6 h1:wbjnrrMnKew78/juW7I2BtKQwa1qlf6EjQgS69uYY14= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6/go.mod h1:AtiqqNrDioJXuUgz3+3T0mBWN7Hro2n9wll2zRUc0ww= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6 h1:uF68eJA6+S9iVr9WgX1NaRGyQ/6MdIyc4JNUo6TN1FA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6/go.mod h1:qlPeVZCGPiobx8wb1ft0GHT5l+dc6ldnwInDFaMvC7Y= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6 h1:pa1DEC6JoI0zduhZePp3zmhWvk/xxm4NB8Hy/Tlsgos= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6/go.mod h1:gxEjPebnhWGJoaDdtDkA0JX46VRg1wcTHYe63OfX5pE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= +github.com/aws/aws-sdk-go-v2/service/ecr v1.50.1 h1:lcwFjRx3C/hBxJzoWkD6DIG2jeB+mzLmFVBFVOadxxE= +github.com/aws/aws-sdk-go-v2/service/ecr v1.50.1/go.mod h1:qt9OL5kXqWoSub4QAkOF74mS3M2zOTNxMODqgwEUjt8= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.37.2 h1:EfatDVSMFxaS5TiR0C0zssQU1Nm+rGx3VbUGIH1y274= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.37.2/go.mod h1:oRy1IEgzXtOkEk4B/J7HZbXUC258drDLtkmc++lN7IA= +github.com/aws/aws-sdk-go-v2/service/eks v1.73.1 h1:Txq5jxY/ao+2Vx/kX9+65WTqkzCnxSlXnwIj+Cr/fng= +github.com/aws/aws-sdk-go-v2/service/eks v1.73.1/go.mod h1:+hYFg3laewH0YCfJRv+o5R3bradDKmFIm/uaiaD1U7U= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6 h1:LHS1YAIJXJ4K9zS+1d/xa9JAA9sL2QyXIQCQFQW/X08= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6/go.mod h1:c9PCiTEuh0wQID5/KqA32J+HAgZxN9tOGXKCiYJjTZI= +github.com/aws/aws-sdk-go-v2/service/kms v1.41.0 h1:2jKyib9msVrAVn+lngwlSplG13RpUZmzVte2yDao5nc= +github.com/aws/aws-sdk-go-v2/service/kms v1.41.0/go.mod h1:RyhzxkWGcfixlkieewzpO3D4P4fTMxhIDqDZWsh0u/4= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.1 h1:8OLZnVJPvjnrxEwHFg9hVUof/P4sibH+Ea4KKuqAGSg= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.1/go.mod h1:27M3BpVi0C02UiQh1w9nsBEit6pLhlaH3NHna6WUbDE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2 h1:gKWSTnqudpo8dAxqBqZnDoDWCiEh/40FziUjr/mo6uA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2/go.mod h1:x7+rkNmRoEN1U13A6JE2fXne9EWyJy54o3n6d4mGaXQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.2 h1:YZPjhyaGzhDQEvsffDEcpycq49nl7fiGcfJTIo8BszI= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.2/go.mod h1:2dIN8qhQfv37BdUYGgEC8Q3tteM3zFxTI1MLO2O3J3c= +github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= +github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.9.1 h1:50sS0RWhGpW/yZx2KcDNEb1u1MANv5BMEkJgcieEDTA= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.9.1/go.mod h1:ErZOtbzuHabipRTDTor0inoRlYwbsV1ovwSxjGs/uJo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bradleyfalzon/ghinstallation/v2 v2.16.0 h1:B91r9bHtXp/+XRgS5aZm6ZzTdz3ahgJYmkt4xZkgDz8= +github.com/bradleyfalzon/ghinstallation/v2 v2.16.0/go.mod h1:OeVe5ggFzoBnmgitZe/A+BqGOnv1DvU/0uiLQi1wutM= github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bsm/ginkgo/v2 v2.7.0/go.mod h1:AiKlXPm7ItEHNc/2+OkrNG4E0ITzojb9/xWzvQ9XZ9w= @@ -193,70 +237,69 @@ github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdb github.com/bsm/gomega v1.26.0/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= -github.com/buildkite/agent/v3 v3.62.0 h1:yvzSjI8Lgifw883I8m9u8/L/Thxt4cLFd5aWPn3gg70= -github.com/buildkite/agent/v3 v3.62.0/go.mod h1:jN6SokGXrVNNIpI0BGQ+j5aWeI3gin8F+3zwA5Q6gqM= -github.com/buildkite/go-pipeline v0.3.2 h1:SW4EaXNwfjow7xDRPGgX0Rcx+dPj5C1kV9LKCLjWGtM= -github.com/buildkite/go-pipeline v0.3.2/go.mod h1:iY5jzs3Afc8yHg6KDUcu3EJVkfaUkd9x/v/OH98qyUA= -github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251 h1:k6UDF1uPYOs0iy1HPeotNa155qXRWrzKnqAaGXHLZCE= -github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251/go.mod h1:gbPR1gPu9dB96mucYIR7T3B7p/78hRVSOuzIWLHK2Y4= -github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= -github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= -github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/buildkite/agent/v3 v3.98.2 h1:VOOxv8XD8HVCtEvtRPQhvB6k2Gorha2gN1wGh94gYAA= +github.com/buildkite/agent/v3 v3.98.2/go.mod h1:+zCvvo/OlOwfs+AH3QvSn37H3cBXP3Fe18eoSbqUvnY= +github.com/buildkite/go-pipeline v0.13.3 h1:llI7sAdZ7sqYE7r8ePlmDADRhJ1K0Kua2+gv74Z9+Es= +github.com/buildkite/go-pipeline v0.13.3/go.mod h1:1uC2XdHkTV1G5jYv9K8omERIwrsYbBruBrPx1Zu1uFw= +github.com/buildkite/interpolate v0.1.5 h1:v2Ji3voik69UZlbfoqzx+qfcsOKLA61nHdU79VV+tPU= +github.com/buildkite/interpolate v0.1.5/go.mod h1:dHnrwHew5O8VNOAgMDpwRlFnhL5VSN6M1bHVmRZ9Ccc= +github.com/buildkite/roko v1.3.1 h1:t7K30ceLLYn6k7hQP4oq1c7dVlhgD5nRcuSRDEEnY1s= +github.com/buildkite/roko v1.3.1/go.mod h1:23R9e6nHxgedznkwwfmqZ6+0VJZJZ2Sg/uVcp2cP46I= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= -github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 h1:krfRl01rzPzxSxyLyrChD+U+MzsBXbm0OwYYB67uF+4= github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589/go.mod h1:OuDyvmLnMCwa2ep4Jkm6nyA0ocJuZlGyk2gGseVzERM= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/clbanning/mxj/v2 v2.5.5/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= github.com/clbanning/mxj/v2 v2.7.0 h1:WA/La7UGCanFe5NpHF0Q3DNtnCsVoxbPKuyBNHWRyME= github.com/clbanning/mxj/v2 v2.7.0/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= -github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= -github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= +github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY= -github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cockroachdb/apd/v3 v3.2.1 h1:U+8j7t0axsIgvQUqthuNm82HIrYXodOV2iWLWtEaIwg= +github.com/cockroachdb/apd/v3 v3.2.1/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be h1:J5BL2kskAlV9ckgEsNQXscjIaLiOYiZ75d4e94E6dcQ= github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod h1:mk5IQ+Y0ZeO87b858TlA645sVcEcbiX6YqP98kt+7+w= -github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= -github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= -github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= -github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0= -github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk= -github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= -github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/containerd/containerd v1.7.28 h1:Nsgm1AtcmEh4AHAJ4gGlNSaKgXiNccU270Dnf81FQ3c= +github.com/containerd/containerd v1.7.28/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs= +github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= +github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= -github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= -github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo= -github.com/coreos/go-oidc/v3 v3.9.0/go.mod h1:rTKz2PYwftcrtoCzV5g5kvfJoWcm0Mk8AF8y1iAQro4= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= +github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= +github.com/coreos/go-oidc/v3 v3.15.0 h1:R6Oz8Z4bqWR7VFQ+sPSvZPQv4x8M+sJkDO5ojgwlyAg= +github.com/coreos/go-oidc/v3 v3.15.0/go.mod h1:HaZ3szPaZ0e4r6ebqvsLWlk2Tn+aejfmrfah6hnSYEU= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= -github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc= -github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= -github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/danieljoos/wincred v1.2.0 h1:ozqKHaLK0W/ii4KVbbvluM91W2H3Sh0BncbUNPS7jLE= -github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.19 h1:tUN6H7LWqNx4hQVxomd0CVsDwaDr9gaRQaI4GpSmrsA= +github.com/creack/pty v1.1.19/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= +github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= +github.com/danieljoos/wincred v1.2.2 h1:774zMFJrqaeYCK2W57BgAem/MLi6mtSE47MB6BOJ0i0= +github.com/danieljoos/wincred v1.2.2/go.mod h1:w7w4Utbrz8lqeMbDAK0lkNJUv5sAOkFi7nd/ogr0Uh8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -272,174 +315,191 @@ github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1G github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/distribution/distribution/v3 v3.0.0-alpha.1 h1:jn7I1gvjOvmLztH1+1cLiUFud7aeJCIQcgzugtwjyJo= -github.com/distribution/distribution/v3 v3.0.0-alpha.1/go.mod h1:LCp4JZp1ZalYg0W/TN05jarCQu+h4w7xc7ZfQF4Y/cY= -github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= -github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg= -github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= +github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= +github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/docker/cli v28.4.0+incompatible h1:RBcf3Kjw2pMtwui5V0DIMdyeab8glEw5QY0UUU4C9kY= +github.com/docker/cli v28.4.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= -github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= -github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= +github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4= -github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/elazarl/goproxy v0.0.0-20231117061959-7cc037d33fb5 h1:m62nsMU279qRD9PQSWD1l66kmkXzuYcnVJqL4XLeV2M= -github.com/elazarl/goproxy v0.0.0-20231117061959-7cc037d33fb5/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o= +github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/proto v1.13.4 h1:myn1fyf8t7tAqIzV91Tj9qXpvyXXGXk8OS2H6IBSc9g= +github.com/emicklei/proto v1.13.4/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= +github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= +github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= -github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= -github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= -github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc= -github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= +github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fluxcd/cli-utils v0.36.0-flux.3 h1:5CQTOc08UnabfwluIYxIhlhpCCTplWBn/xpjVr560J0= -github.com/fluxcd/cli-utils v0.36.0-flux.3/go.mod h1:9lShvUz7uRPIjYZ6phr5AOuORkRDmaUgf/sZN7SDcpo= +github.com/fluxcd/cli-utils v0.36.0-flux.15 h1:Et5QLnIpRjj+oZtM9gEybkAaoNsjysHq0y1253Ai94Y= +github.com/fluxcd/cli-utils v0.36.0-flux.15/go.mod h1:AqRUmWIfNE7cdL6NWSGF0bAlypGs+9x5UQ2qOtlEzv4= github.com/fluxcd/gitkit v0.6.0 h1:iNg5LTx6ePo+Pl0ZwqHTAkhbUHxGVSY3YCxCdw7VIFg= github.com/fluxcd/gitkit v0.6.0/go.mod h1:svOHuKi0fO9HoawdK4HfHAJJseZDHHjk7I3ihnCIqNo= -github.com/fluxcd/pkg/apis/acl v0.1.0 h1:EoAl377hDQYL3WqanWCdifauXqXbMyFuK82NnX6pH4Q= -github.com/fluxcd/pkg/apis/acl v0.1.0/go.mod h1:zfEZzz169Oap034EsDhmCAGgnWlcWmIObZjYMusoXS8= -github.com/fluxcd/pkg/apis/event v0.7.0 h1:QN/gz9i5kZ3GlfTOE6SCjjnSXrSPUU75MCVRwN8U+qo= -github.com/fluxcd/pkg/apis/event v0.7.0/go.mod h1:zdqe8SVXjFQ/Nfuk51c2SJe0NkyNwYOxSFtN6SmikVs= -github.com/fluxcd/pkg/apis/meta v1.3.0 h1:KxeEc6olmSZvQ5pBONPE4IKxyoWQbqTJF1X6K5nIXpU= -github.com/fluxcd/pkg/apis/meta v1.3.0/go.mod h1:3Ui8xFkoU4sYehqmscjpq7NjqH2YN1A2iX2okbO3/yA= -github.com/fluxcd/pkg/git v0.17.0 h1:eHL8IazeX2HXwXzT6zLdzGaX3H37n/ipkdd1+byyzUM= -github.com/fluxcd/pkg/git v0.17.0/go.mod h1:lBeHCTtVt9py9mMGj5sKs4+aFpMWGjH73gx5i818i6o= -github.com/fluxcd/pkg/git/gogit v0.17.0 h1:X8C+q/Nm/MjBKAoqw9NPpnJh0B3IxVLtqPgH+wT9NEg= -github.com/fluxcd/pkg/git/gogit v0.17.0/go.mod h1:qyRSCQy41wG0FwUwKQtfSnwqkrJg5XB4UdMvrHjIcFY= -github.com/fluxcd/pkg/gittestserver v0.10.0 h1:joqfczQNtguZFGxTuRL535ymDW/9clA1jBWa3d8B6WU= -github.com/fluxcd/pkg/gittestserver v0.10.0/go.mod h1:Prva0GFjFWmzEsVutACXl0dsAfIEfMfCepEOzzjP3Bw= -github.com/fluxcd/pkg/helmtestserver v0.16.0 h1:fQDfyFef9U+/jCF3FYFppmdd/RS/MmH6DxILdyFl/pg= -github.com/fluxcd/pkg/helmtestserver v0.16.0/go.mod h1:P8dwoySpSjCxngARo4vIVMP79i5LMm+hi6EMAQuyC84= -github.com/fluxcd/pkg/lockedfile v0.1.0 h1:YsYFAkd6wawMCcD74ikadAKXA4s2sukdxrn7w8RB5eo= -github.com/fluxcd/pkg/lockedfile v0.1.0/go.mod h1:EJLan8t9MiOcgTs8+puDjbE6I/KAfHbdvIy9VUgIjm8= -github.com/fluxcd/pkg/masktoken v0.2.0 h1:HoSPTk4l1fz5Fevs2vVRvZGru33blfMwWSZKsHdfG/0= -github.com/fluxcd/pkg/masktoken v0.2.0/go.mod h1:EA7GleAHL33kN6kTW06m5R3/Q26IyuGO7Ef/0CtpDI0= -github.com/fluxcd/pkg/oci v0.35.0 h1:VPFeEsF9U2O5Vg/l/cD0d6+MHzQUJGpT3OBSGEXpN8U= -github.com/fluxcd/pkg/oci v0.35.0/go.mod h1:B5Q+Rb4zfQ9GR24FjUsCNmQMWXNRfsC0ovHxFXrpUCo= -github.com/fluxcd/pkg/runtime v0.44.0 h1:0BEPSpcsYXOiswKG5TWkin8fhCDHb0nDdAtq/5VrCSI= -github.com/fluxcd/pkg/runtime v0.44.0/go.mod h1:s1AhSOTCEBPaTfz/GdBD/Ws66uOByIuNP4Znrq+is9M= -github.com/fluxcd/pkg/sourceignore v0.5.0 h1:8ffSJCRIKsMpxXjGPVeRK3xhGUjuk+tFILf/+EODCVg= -github.com/fluxcd/pkg/sourceignore v0.5.0/go.mod h1:cJsXn+wYmRY3VamrtG9I3MBL2wjtns2bS7ARIht2XAQ= -github.com/fluxcd/pkg/ssh v0.11.0 h1:7WDDrcB0cNimzZjrpkzYBrizkrUgyM4Zr2cd9z9aqpU= -github.com/fluxcd/pkg/ssh v0.11.0/go.mod h1:K8YgH8KM0GV5DWuRErX3iKgpoHlYh08SBK+U5Q0teJc= -github.com/fluxcd/pkg/tar v0.4.0 h1:SuXpfXBIcSJ5R/yqQi2CBxBmV/i/LH0agqNAh2PWBZg= -github.com/fluxcd/pkg/tar v0.4.0/go.mod h1:SyJBaQvuv2VA/rv4d1OHhCV6R8+9QKc9np193EzNHBc= -github.com/fluxcd/pkg/testserver v0.5.0 h1:n/Iskk0tXNt2AgIgjz9qeFK/VhEXGfqeazABXZmO2Es= -github.com/fluxcd/pkg/testserver v0.5.0/go.mod h1:/p4st6d0uPLy8wXydeF/kDJgxUYO9u2NqySuXb9S+Fo= -github.com/fluxcd/pkg/version v0.2.2 h1:ZpVXECeLA5hIQMft11iLp6gN3cKcz6UNuVTQPw/bRdI= -github.com/fluxcd/pkg/version v0.2.2/go.mod h1:NGnh/no8S6PyfCDxRFrPY3T5BUnqP48MxfxNRU0z8C0= -github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= -github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/fluxcd/pkg/apis/acl v0.9.0 h1:wBpgsKT+jcyZEcM//OmZr9RiF8klL3ebrDp2u2ThsnA= +github.com/fluxcd/pkg/apis/acl v0.9.0/go.mod h1:TttNS+gocsGLwnvmgVi3/Yscwqrjc17+vhgYfqkfrV4= +github.com/fluxcd/pkg/apis/event v0.19.0 h1:ZJU2voontkzp5rNYA4JMOu40S4tRcrWi4Do59EnyFwg= +github.com/fluxcd/pkg/apis/event v0.19.0/go.mod h1:deuIyUb6lh+Z1Ccvwwxhm1wNM3kpSo+vF1IgRnpaZfQ= +github.com/fluxcd/pkg/apis/meta v1.21.0 h1:R+bN02chcs0HUmyVDQhqe/FHmYLjipVDMLnyYfNX850= +github.com/fluxcd/pkg/apis/meta v1.21.0/go.mod h1:XUAEUgT4gkWDAEN79E141tmL+v4SV50tVZ/Ojpc/ueg= +github.com/fluxcd/pkg/artifact v0.3.0 h1:Mxescx4HOaXJDYhdgecmZwGdnrgPFu/N6sJY9GuTpuo= +github.com/fluxcd/pkg/artifact v0.3.0/go.mod h1:CFtfSBcma+WBkIhjxleaXoCwIjccdkunLO7gv/59xe8= +github.com/fluxcd/pkg/auth v0.31.0 h1:PIwSn7Onq74cGDTocZJZ6P47FxGvbT8NIW7UKFm51rU= +github.com/fluxcd/pkg/auth v0.31.0/go.mod h1:Qxc5OKRMLBwtxO0nf2stm4ZkgzXcrvF6x6BSquiAMW8= +github.com/fluxcd/pkg/cache v0.11.0 h1:fsE8S+una21fSNw4MDXGUIf0Gf1J+pqa4RbsVKf2aTI= +github.com/fluxcd/pkg/cache v0.11.0/go.mod h1:2RTIU6PsJniHmfnllQWFEo7fa5V8KQlnMgn4o0sme40= +github.com/fluxcd/pkg/git v0.36.0 h1:oakFKxTX5yiLcFzCS1SaV+mMXaODaF1Ic6/oCLfIe7I= +github.com/fluxcd/pkg/git v0.36.0/go.mod h1:4TgfjcoM3B2sGsO5VbfBSwJQYzNCONGihcTOW8P3Jxw= +github.com/fluxcd/pkg/git/gogit v0.40.0 h1:VCsHC1440jMk1wAGWCwkgU2nDUBOPeYbCk6/OtvbY7Y= +github.com/fluxcd/pkg/git/gogit v0.40.0/go.mod h1:nQVyfa+rYSeVQiwVH5f/C4o1sf2MtMFjMlt3VSkC+P0= +github.com/fluxcd/pkg/gittestserver v0.20.0 h1:xhzLV89mta23ZvTK0cpDCR6ni6vp5Di+9b4v3YBziMQ= +github.com/fluxcd/pkg/gittestserver v0.20.0/go.mod h1:vGmM9eDJk56gx+osTcSHeScefnAaL4czR+rsNsvh0nw= +github.com/fluxcd/pkg/helmtestserver v0.30.0 h1:gEJ6kHei8/SB8J/YemeWaypCxRtfmoejqMxtEOlZRgI= +github.com/fluxcd/pkg/helmtestserver v0.30.0/go.mod h1:xXOkfz7/4z8fz9GJYrYVB9we7bvtmdKKedBeGPHVlhs= +github.com/fluxcd/pkg/http/transport v0.7.0 h1:LbA0qzh1lT6GncWLkN/BjbSMrN8bdFtaa2TqxiIdyzs= +github.com/fluxcd/pkg/http/transport v0.7.0/go.mod h1:G3ptGZKlY0PJZsvWCwzV9vKQ90yfP/mKT2/ZdAud9LE= +github.com/fluxcd/pkg/lockedfile v0.7.0 h1:tmzW2GeMGuJMiCcVloXVd1vKZ92anm9WGkRgOBpWfRk= +github.com/fluxcd/pkg/lockedfile v0.7.0/go.mod h1:AzCV/h1N3hi/KtUDUCUgS8hl1+a1y+I6pmRo25dxdK0= +github.com/fluxcd/pkg/masktoken v0.8.0 h1:Dm5xIVNbg0s6zNttjDvimaG38bKsXwxBVo5b+D7ThVU= +github.com/fluxcd/pkg/masktoken v0.8.0/go.mod h1:Gc73ALOqIe+5Gj2V3JggMNiYcBiZ9bNNDYBE9R5XTTg= +github.com/fluxcd/pkg/oci v0.56.0 h1:t/jnHpizC+j7Gugw8y14HdfHnhLIgmxR3yNdArghUrM= +github.com/fluxcd/pkg/oci v0.56.0/go.mod h1:WZxMYYWfugc4rtnq2zHUIHxH0+e6IRhP9EDq+mW/Z2w= +github.com/fluxcd/pkg/runtime v0.84.0 h1:3M+egIQwQU9YYjKQkczyawG+9RUOkGtkDMQlePnEeTM= +github.com/fluxcd/pkg/runtime v0.84.0/go.mod h1:Wt9mUzQgMPQMu2D/wKl5pG4zh5vu/tfF5wq9pPobxOQ= +github.com/fluxcd/pkg/sourceignore v0.14.0 h1:ZiZzbXtXb/Qp7I7JCStsxOlX8ri8rWwCvmvIrJ0UzQQ= +github.com/fluxcd/pkg/sourceignore v0.14.0/go.mod h1:E3zKvyTyB+oQKqm/2I/jS6Rrt3B7fNuig/4bY2vi3bg= +github.com/fluxcd/pkg/ssh v0.21.0 h1:ZmyF0n9je0cTTkOpvFVgIhmdx9qtswnVE60TK4IzJh0= +github.com/fluxcd/pkg/ssh v0.21.0/go.mod h1:nX+gvJOmjf0E7lxq5mKKzDIdPEL2jOUQZbkBMS+mDtk= +github.com/fluxcd/pkg/tar v0.14.0 h1:9Gku8FIvPt2bixKldZnzXJ/t+7SloxePlzyVGOK8GVQ= +github.com/fluxcd/pkg/tar v0.14.0/go.mod h1:+rOWYk93qLEJ8WwmkvJOkB8i0dna1mrwJFybE8i9Udo= +github.com/fluxcd/pkg/testserver v0.13.0 h1:xEpBcEYtD7bwvZ+i0ZmChxKkDo/wfQEV3xmnzVybSSg= +github.com/fluxcd/pkg/testserver v0.13.0/go.mod h1:akRYv3FLQUsme15na9ihECRG6hBuqni4XEY9W8kzs8E= +github.com/fluxcd/pkg/version v0.10.0 h1:WETlCRbfbocsDItkCCeh/4x4zQkZ5i/lUe7P7VaQBrI= +github.com/fluxcd/pkg/version v0.10.0/go.mod h1:dgmjEq4ykvBnqK1oVXM+hcXx3kAY/b4uZDYUn8XnHjk= +github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= +github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/gliderlabs/ssh v0.3.6 h1:ZzjlDa05TcFRICb3anf/dSPN3ewz1Zx6CMLPWgkm3b8= -github.com/gliderlabs/ssh v0.3.6/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= +github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= +github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk= +github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= -github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= +github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= -github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= -github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= +github.com/go-git/go-git/v5 v5.16.2 h1:fT6ZIOjE5iEnkzKyxTHK1W4HGAsPhqEqiSAssSO77hM= +github.com/go-git/go-git/v5 v5.16.2/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= -github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA= -github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY= +github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= +github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= +github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-ldap/ldap/v3 v3.4.10 h1:ot/iwPOhfpNVgB1o+AVXljizWZ9JTp7YF5oeyONmcJU= +github.com/go-ldap/ldap/v3 v3.4.10/go.mod h1:JXh4Uxgi40P6E9rdsYqpUtbW46D9UTjJ9QSwGRznplY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/analysis v0.22.0 h1:wQ/d07nf78HNj4u+KiSY0sT234IAyePPbMgpUjUJQR0= -github.com/go-openapi/analysis v0.22.0/go.mod h1:acDnkkCI2QxIo8sSIPgmp1wUlRohV7vfGtAIVae73b0= -github.com/go-openapi/errors v0.21.0 h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY= -github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho= -github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= -github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= -github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= -github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= -github.com/go-openapi/loads v0.21.5 h1:jDzF4dSoHw6ZFADCGltDb2lE4F6De7aWSpe+IcsRzT0= -github.com/go-openapi/loads v0.21.5/go.mod h1:PxTsnFBoBe+z89riT+wYt3prmSBP6GDAQh2l9H1Flz8= -github.com/go-openapi/runtime v0.27.1 h1:ae53yaOoh+fx/X5Eaq8cRmavHgDma65XPZuvBqvJYto= -github.com/go-openapi/runtime v0.27.1/go.mod h1:fijeJEiEclyS8BRurYE1DE5TLb9/KZl6eAdbzjsrlLU= -github.com/go-openapi/spec v0.20.13 h1:XJDIN+dLH6vqXgafnl5SUIMnzaChQ6QTo0/UPMbkIaE= -github.com/go-openapi/spec v0.20.13/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw= -github.com/go-openapi/strfmt v0.22.0 h1:Ew9PnEYc246TwrEspvBdDHS4BVKXy/AOVsfqGDgAcaI= -github.com/go-openapi/strfmt v0.22.0/go.mod h1:HzJ9kokGIju3/K6ap8jL+OlGAbjpSv27135Yr9OivU4= -github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= -github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= -github.com/go-openapi/validate v0.22.4 h1:5v3jmMyIPKTR8Lv9syBAIRxG6lY0RqeBPB1LKEijzk8= -github.com/go-openapi/validate v0.22.4/go.mod h1:qm6O8ZIcPVdSY5219468Jv7kBdGvkiZLPOmqnqTUZ2A= -github.com/go-piv/piv-go v1.11.0 h1:5vAaCdRTFSIW4PeqMbnsDlUZ7odMYWnHBDGdmtU/Zhg= -github.com/go-piv/piv-go v1.11.0/go.mod h1:NZ2zmjVkfFaL/CF8cVQ/pXdXtuj110zEKGdJM6fJZZM= -github.com/go-rod/rod v0.114.5 h1:1x6oqnslwFVuXJbJifgxspJUd3O4ntaGhRLHt+4Er9c= -github.com/go-rod/rod v0.114.5/go.mod h1:aiedSEFg5DwG/fnNbUOTPMTTWX3MRj6vIs/a684Mthw= -github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= -github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU= +github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= +github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= +github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= +github.com/go-piv/piv-go/v2 v2.3.0 h1:kKkrYlgLQTMPA6BiSL25A7/x4CEh2YCG7rtb/aTkx+g= +github.com/go-piv/piv-go/v2 v2.3.0/go.mod h1:ShZi74nnrWNQEdWzRUd/3cSig3uNOcEZp+EWl0oewnI= +github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= +github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-sql-driver/mysql v1.9.2 h1:4cNKDYQ1I84SXslGddlsrMhc8k4LeDVj6Ad6WRjiHuU= +github.com/go-sql-driver/mysql v1.9.2/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= -github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/gobuffalo/logger v1.0.6 h1:nnZNpxYo0zx+Aj9RfMPBm+x9zAU2OayFh/xrAWi34HU= -github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs= -github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0= -github.com/gobuffalo/packd v1.0.1/go.mod h1:PP2POP3p3RXGz7Jh6eYEf93S7vA2za6xM7QT85L4+VY= -github.com/gobuffalo/packr/v2 v2.8.3 h1:xE1yzvnO56cUC0sTpKR3DIbxZgB54AftTFMhB2XEWlY= -github.com/gobuffalo/packr/v2 v2.8.3/go.mod h1:0SahksCVcx4IMnigTjiFuyldmTrdTctXsOdiU5KwbKc= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= +github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= +github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= @@ -449,16 +509,14 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= -github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= +github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= -github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -469,138 +527,152 @@ github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:x github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/certificate-transparency-go v1.1.7 h1:IASD+NtgSTJLPdzkthwvAG1ZVbF2WtFg4IvoA68XGSw= -github.com/google/certificate-transparency-go v1.1.7/go.mod h1:FSSBo8fyMVgqptbfF6j5p/XNdgQftAhSmXcIxV9iphE= -github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= -github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= +github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.18.0 h1:ShE7erKNPqRh5ue6Z9DUOlk04WsnFWPO6YGr3OxnfoQ= -github.com/google/go-containerregistry v0.18.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= -github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20231202142526-55ffb0092afd h1:mZmB2vabEQDTlhnIQ0t5vEtWkZM8pEY8koVslApiS7k= -github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20231202142526-55ffb0092afd/go.mod h1:Ek+8PQrShkA7aHEj3/zSW33wU0V/Bx3zW/gFh7l21xY= -github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa h1:+MG+Q2Q7mtW6kCIbUPZ9ZMrj7xOWDKI1hhy1qp0ygI0= -github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa/go.mod h1:KdL98/Va8Dy1irB6lTxIRIQ7bQj4lbrlvqUzKEQ+ZBU= -github.com/google/go-github/v55 v55.0.0 h1:4pp/1tNMB9X/LuAhs5i0KQAE40NmiR/y6prLNb9x9cg= -github.com/google/go-github/v55 v55.0.0/go.mod h1:JLahOTA1DnXzhxEymmFF5PP2tSS9JVNj68mSZNDwskA= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.6 h1:cvWX87UxxLgaH76b4hIvya6Dzz9qHB31qAwjAohdSTU= +github.com/google/go-containerregistry v0.20.6/go.mod h1:T0x8MuoAoKX/873bkeSfLD2FAkwCDf9/HZgsFJ02E2Y= +github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20250613215107-59a4b8593039 h1:1d9SJvpHXjFuYBHAS5576memil93kLpgBZ5OjdtvW4I= +github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20250613215107-59a4b8593039/go.mod h1:AlUTqI/YtH9ckkhLo4ClTAccEOZz8EaLVxqrfv56OFg= +github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20250225234217-098045d5e61f h1:GJRzEBoJv/A/E7JbTekq1Q0jFtAfY7TIxUFAK89Mmic= +github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20250225234217-098045d5e61f/go.mod h1:ZT74/OE6eosKneM9/LQItNxIMBV6CI5S46EXAnvkTBI= +github.com/google/go-github/v72 v72.0.0 h1:FcIO37BLoVPBO9igQQ6tStsv2asG4IPcYFi655PPvBM= +github.com/google/go-github/v72 v72.0.0/go.mod h1:WWtw8GMRiL62mvIquf1kO3onRHeWWKmK01qdCY8c5fg= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= -github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0= -github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 h1:xhMrHhTJ6zxu3gA4enFM9MLn9AY7613teCdFnlUVbSQ= +github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/tink/go v1.7.0 h1:6Eox8zONGebBFcCBqkVmt60LaWZa6xg1cl/DwAh/J1w= -github.com/google/tink/go v1.7.0/go.mod h1:GAUOd+QE3pgj9q8VKIGTCP33c/B7eb4NhxLcgTJZStM= -github.com/google/trillian v1.5.3 h1:3ioA5p09qz+U9/t2riklZtaQdZclaStp0/eQNfewNRg= -github.com/google/trillian v1.5.3/go.mod h1:p4tcg7eBr7aT6DxrAoILpc3uXNfcuAvZSnQKonVg+Eo= +github.com/google/trillian v1.7.2 h1:EPBxc4YWY4Ak8tcuhyFleY+zYlbCDCa4Sn24e1Ka8Js= +github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81zxNQw05s= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= +github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= +github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= -github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0/go.mod h1:TzP6duP4Py2pHLVPPQp42aoYI92+PCrVotyR5e8Vqlk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M= -github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= +github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-sockaddr v1.0.5 h1:dvk7TIXCZpmfOlM+9mlcrWmWjw/wlKT+VDq2wMvfPJU= -github.com/hashicorp/go-sockaddr v1.0.5/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI= +github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= +github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= -github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= -github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hashicorp/vault/api v1.10.0 h1:/US7sIjWN6Imp4o/Rj1Ce2Nr5bki/AXi9vAW3p2tOJQ= -github.com/hashicorp/vault/api v1.10.0/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= +github.com/hashicorp/vault/api v1.16.0 h1:nbEYGJiAPGzT9U4oWgaaB0g+Rj8E59QuHKyA5LhwQN4= +github.com/hashicorp/vault/api v1.16.0/go.mod h1:KhuUhzOD8lDSk29AtzNjgAu2kxRA9jL9NAbkFlqvkBA= github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= -github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= -github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/in-toto/attestation v1.1.1 h1:QD3d+oATQ0dFsWoNh5oT0udQ3tUrOsZZ0Fc3tSgWbzI= +github.com/in-toto/attestation v1.1.1/go.mod h1:Dcq1zVwA2V7Qin8I7rgOi+i837wEf/mOZwRm047Sjys= github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= +github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= +github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= +github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= -github.com/jellydator/ttlcache/v3 v3.1.1 h1:RCgYJqo3jgvhl+fEWvjNW8thxGWsgxi+TPhRir1Y9y8= -github.com/jellydator/ttlcache/v3 v3.1.1/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jellydator/ttlcache/v3 v3.3.0 h1:BdoC9cE81qXfrxeb9eoJi9dWrdhSuwXMAnHTbnBm4Wc= +github.com/jellydator/ttlcache/v3 v3.3.0/go.mod h1:bj2/e0l4jRnQdrnSTaGTsh4GSXvMjQcy41i7th0GVGw= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= -github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= -github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= +github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= +github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -610,23 +682,21 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= -github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= +github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -639,145 +709,142 @@ github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= -github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491 h1:WGrKdjHtWC67RX96eTkYD2f53NDHhrq/7robWTAfk4s= -github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491/go.mod h1:o158RFmdEbYyIZmXAbrvmJWesbyxlLKee6X64VPVuOc= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ= +github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI= -github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc= -github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY= -github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= -github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= -github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A= +github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= -github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= -github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= +github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= +github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= +github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/minio/crc64nvme v1.0.1 h1:DHQPrYPdqK7jQG/Ls5CTBZWeex/2FMS3G5XGkycuFrY= +github.com/minio/crc64nvme v1.0.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.66 h1:bnTOXOHjOqv/gcMuiVbN9o2ngRItvqE774dG9nq0Dzw= -github.com/minio/minio-go/v7 v7.0.66/go.mod h1:DHAgmyQEGdW3Cif0UooKOyrT3Vxs82zNdV6tkKhRtbs= -github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= -github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/minio/minio-go/v7 v7.0.94 h1:1ZoksIKPyaSt64AVOyaQvhDOgVC3MfZsWM6mZXRUGtM= +github.com/minio/minio-go/v7 v7.0.94/go.mod h1:71t2CqDt3ThzESgZUlU1rBN54mksGGlkLcFgguDnnAc= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= -github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= -github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= -github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= -github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= +github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mozillazg/docker-credential-acr-helper v0.3.0 h1:DVWFZ3/O8BP6Ue3iS/Olw+G07u1hCq1EOVCDZZjCIBI= -github.com/mozillazg/docker-credential-acr-helper v0.3.0/go.mod h1:cZlu3tof523ujmLuiNUb6JsjtHcNA70u1jitrrdnuyA= -github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/mozillazg/docker-credential-acr-helper v0.4.0 h1:Uoh3Z9CcpEDnLiozDx+D7oDgRq7X+R296vAqAumnOcw= +github.com/mozillazg/docker-credential-acr-helper v0.4.0/go.mod h1:2kiicb3OlPytmlNC9XGkLvVC+f0qTiJw3f/mhmeeQBg= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/notaryproject/notation-core-go v1.3.0 h1:mWJaw1QBpBxpjLSiKOjzbZvB+xh2Abzk14FHWQ+9Kfs= +github.com/notaryproject/notation-core-go v1.3.0/go.mod h1:hzvEOit5lXfNATGNBT8UQRx2J6Fiw/dq/78TQL8aE64= +github.com/notaryproject/notation-go v1.3.2 h1:4223iLXOHhEV7ZPzIUJEwwMkhlgzoYFCsMJvSH1Chb8= +github.com/notaryproject/notation-go v1.3.2/go.mod h1:/1kuq5WuLF6Gaer5re0Z6HlkQRlKYO4EbWWT/L7J1Uw= +github.com/notaryproject/notation-plugin-framework-go v1.0.0 h1:6Qzr7DGXoCgXEQN+1gTZWuJAZvxh3p8Lryjn5FaLzi4= +github.com/notaryproject/notation-plugin-framework-go v1.0.0/go.mod h1:RqWSrTOtEASCrGOEffq0n8pSg2KOgKYiWqFWczRSics= +github.com/notaryproject/tspclient-go v1.0.0 h1:AwQ4x0gX8IHnyiZB1tggpn5NFqHpTEm1SDX8YNv4Dg4= +github.com/notaryproject/tspclient-go v1.0.0/go.mod h1:LGyA/6Kwd2FlM0uk8Vc5il3j0CddbWSHBj/4kxQDbjs= github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/oleiade/reflections v1.0.1 h1:D1XO3LVEYroYskEsoSiGItp9RUxG6jWnCVvrqH0HHQM= -github.com/oleiade/reflections v1.0.1/go.mod h1:rdFxbxq4QXVZWj0F+e9jqjDkc7dbp97vkRixKo2JR60= +github.com/oleiade/reflections v1.1.0 h1:D+I/UsXQB4esMathlt0kkZRJZdUDmhv5zGi/HOwYTWo= +github.com/oleiade/reflections v1.1.0/go.mod h1:mCxx0QseeVCHs5Um5HhJeCKVC7AwS8kO67tky4rdisA= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= -github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= +github.com/onsi/ginkgo/v2 v2.25.2 h1:hepmgwx1D+llZleKQDMEvy8vIlCxMGt7W5ZxDjIEhsw= +github.com/onsi/ginkgo/v2 v2.25.2/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo= -github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= +github.com/open-policy-agent/opa v1.5.1 h1:LTxxBJusMVjfs67W4FoRcnMfXADIGFMzpqnfk6D08Cg= +github.com/open-policy-agent/opa v1.5.1/go.mod h1:bYbS7u+uhTI+cxHQIpzvr5hxX0hV7urWtY+38ZtjMgk= github.com/opencontainers/go-digest v1.0.1-0.20220411205349-bde1400a84be h1:f2PlhC9pm5sqpBZFvnAoKj+KzXRzbjFMA+TqXfJdgho= github.com/opencontainers/go-digest v1.0.1-0.20220411205349-bde1400a84be/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/go-digest/blake3 v0.0.0-20231025023718-d50d2fec9c98 h1:LTxrNWOPwquJy9Cu3oz6QHJIO5M5gNyOZtSybXdyLA4= -github.com/opencontainers/go-digest/blake3 v0.0.0-20231025023718-d50d2fec9c98/go.mod h1:kqQaIc6bZstKgnGpL7GD5dWoLKbA6mH1Y9ULjGImBnM= -github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= -github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= -github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs= -github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opencontainers/go-digest/blake3 v0.0.0-20250813155314-89707e38ad1a h1:IAncDmJeD90l6+YR1Gf6r0HrmnRmOatzPfUpMS80ZTI= +github.com/opencontainers/go-digest/blake3 v0.0.0-20250813155314-89707e38ad1a/go.mod h1:kqQaIc6bZstKgnGpL7GD5dWoLKbA6mH1Y9ULjGImBnM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/opencontainers/runc v1.2.4 h1:yWFgLkghp71D76Fa0l349yAl5g4Gse7DPYNlvkQ9Eiw= +github.com/opencontainers/runc v1.2.4/go.mod h1:nSxcWUydXrsBZVYNSkTjoQ/N6rcyTtn+1SD5D4+kRIM= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4= -github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg= -github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= -github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= -github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= -github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= +github.com/ory/dockertest/v3 v3.12.0 h1:3oV9d0sDzlSQfHtIaB5k6ghUCVMVLpAY8hwrqoCyRCw= +github.com/ory/dockertest/v3 v3.12.0/go.mod h1:aKNDTva3cp8dwOWwb9cWuX84aH5akkxXRvO7KCwWVjE= +github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= +github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= +github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= +github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= -github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= -github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= +github.com/pjbgf/sha1cd v0.4.0 h1:NXzbL1RvjTUi6kgYZCX3fPwwl27Q1LJndxtUDVfJGRY= +github.com/pjbgf/sha1cd v0.4.0/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -786,118 +853,121 @@ github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjz github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/otlptranslator v0.0.2 h1:+1CdeLVrRQ6Psmhnobldo0kTp96Rj80DRXRd5OSnMEQ= +github.com/prometheus/otlptranslator v0.0.2/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/protocolbuffers/txtpbfmt v0.0.0-20241112170944-20d2c9ebc01d h1:HWfigq7lB31IeJL8iy7jkUmU/PG1Sr8jVGhS749dbUA= +github.com/protocolbuffers/txtpbfmt v0.0.0-20241112170944-20d2c9ebc01d/go.mod h1:jgxiZysxFPM+iWKwQwPR+y+Jvo54ARd4EisXxKYpB5c= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho= github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U= github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ= github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= -github.com/redis/go-redis/v9 v9.3.0 h1:RiVDjmig62jIWp7Kk4XVLs0hzV6pI3PyTnnL0cnn0u0= -github.com/redis/go-redis/v9 v9.3.0/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M= +github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI= +github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= -github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rubenv/sql-migrate v1.5.2 h1:bMDqOnrJVV/6JQgQ/MxOpU+AdO8uzYYA/TxFUBzFtS0= -github.com/rubenv/sql-migrate v1.5.2/go.mod h1:H38GW8Vqf8F0Su5XignRyaRcbXbJunSWxs+kmzlg0Is= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o= +github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= -github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= -github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= +github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= -github.com/sassoftware/relic/v7 v7.6.1 h1:O5s8ewCgq5QYNpv45dK4u6IpBmDM9RIcsbf/G1uXepQ= -github.com/sassoftware/relic/v7 v7.6.1/go.mod h1:NxwtWxWxlUa9as2qZi635Ye6bBT/tGnMALLq7dSfOOU= -github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= -github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= +github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= +github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= +github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc= +github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw= github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= -github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= -github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sigstore/cosign/v2 v2.2.3 h1:WX7yawI+EXu9h7S5bZsfYCbB9XW6Jc43ctKy/NoOSiA= -github.com/sigstore/cosign/v2 v2.2.3/go.mod h1:WpMn4MBt0cI23GdHsePwO4NxhX1FOz1ITGB3ALUjFaI= -github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ= -github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og= -github.com/sigstore/rekor v1.3.4 h1:RGIia1iOZU7fOiiP2UY/WFYhhp50S5aUm7YrM8aiA6E= -github.com/sigstore/rekor v1.3.4/go.mod h1:1GubPVO2yO+K0m0wt/3SHFqnilr/hWbsjSOe7Vzxrlg= -github.com/sigstore/sigstore v1.8.1 h1:mAVposMb14oplk2h/bayPmIVdzbq2IhCgy4g6R0ZSjo= -github.com/sigstore/sigstore v1.8.1/go.mod h1:02SL1158BSj15bZyOFz7m+/nJzLZfFd9A8ab3Kz7w/E= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.1 h1:rEDdUefulkIQaMJyzLwtgPDLNXBIltBABiFYfb0YmgQ= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.1/go.mod h1:RCdYCc1IxCYWzh2IdzdA6Yf7JIY0cMRqH08fpQYechw= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.1 h1:DvRWG99QGWZC5mp42SEde2Xke/Q384Idnj2da7yB+Mk= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.1/go.mod h1:s13mo3a0UCQS3+PAUUZfvKe48sMDMsHk2GE1b2YfPcU= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.1 h1:lwdRsJv1UbBemuk7w5YfXAQilQxMoFevrzamdPbG0wY= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.1/go.mod h1:2OaSQ80EcdyVRSQ3T4d1lsc6Scopblsiq8U2AEk5K1A= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.1 h1:9Ki0qudKpc1FQdef7xHO2bkLyTuw+qNUpWRzjBEmF4c= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.1/go.mod h1:nhIgyu4YwwNgalIwTGsoAzam16jjAn3ADRSWKbWPwGI= -github.com/sigstore/timestamp-authority v1.2.1 h1:j9RmqSAdvKgSofeltPO4x7d+1M3AXaROBzUJ+AA7L5Q= -github.com/sigstore/timestamp-authority v1.2.1/go.mod h1:Ce+vWWEf0QaKLY2u6mpwEJbmYXEVeOfUk4fQ69kE6ck= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/sigstore/cosign/v2 v2.5.2 h1:i5Dw7M7W9OcWgyiknJB8vNx/07KweninBDxRoHPxqHE= +github.com/sigstore/cosign/v2 v2.5.2/go.mod h1:CYlcgkPQJZ5pvWlbl7mOfO/Q1S1N7r4tpdYCtFwhXco= +github.com/sigstore/fulcio v1.7.1 h1:RcoW20Nz49IGeZyu3y9QYhyyV3ZKQ85T+FXPKkvE+aQ= +github.com/sigstore/fulcio v1.7.1/go.mod h1:7lYY+hsd8Dt+IvKQRC+KEhWpCZ/GlmNvwIa5JhypMS8= +github.com/sigstore/protobuf-specs v0.4.3 h1:kRgJ+ciznipH9xhrkAbAEHuuxD3GhYnGC873gZpjJT4= +github.com/sigstore/protobuf-specs v0.4.3/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/rekor v1.3.10 h1:/mSvRo4MZ/59ECIlARhyykAlQlkmeAQpvBPlmJtZOCU= +github.com/sigstore/rekor v1.3.10/go.mod h1:JvryKJ40O0XA48MdzYUPu0y4fyvqt0C4iSY7ri9iu3A= +github.com/sigstore/sigstore v1.9.5 h1:Wm1LT9yF4LhQdEMy5A2JeGRHTrAWGjT3ubE5JUSrGVU= +github.com/sigstore/sigstore v1.9.5/go.mod h1:VtxgvGqCmEZN9X2zhFSOkfXxvKUjpy8RpUW39oCtoII= +github.com/sigstore/sigstore-go v1.0.0 h1:4N07S2zLxf09nTRwaPKyAxbKzpM8WJYUS8lWWaYxneU= +github.com/sigstore/sigstore-go v1.0.0/go.mod h1:UYsZ/XHE4eltv1o1Lu+n6poW1Z5to3f0+emvfXNxIN8= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.9.5 h1:qp2VFyKuFQvTGmZwk5Q7m5nE4NwnF9tHwkyz0gtWAck= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.9.5/go.mod h1:DKlQjjr+GsWljEYPycI0Sf8URLCk4EbGA9qYjF47j4g= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.9.5 h1:CRZcdYn5AOptStsLRAAACudAVmb1qUbhMlzrvm7ju3o= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.9.5/go.mod h1:b9rFfITq2fp1M3oJmq6lFFhSrAz5vOEJH1qzbMsZWN4= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.9.5 h1:7U0GsO0UGG1PdtgS6wBkRC0sMgq7BRVaFlPRwN4m1Qg= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.9.5/go.mod h1:/2qrI0nnCy/DTIPOMFaZlFnNPWEn5UeS70P37XEM88o= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.9.5 h1:S2ukEfN1orLKw2wEQIUHDDlzk0YcylhcheeZ5TGk8LI= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.9.5/go.mod h1:m7sQxVJmDa+rsmS1m6biQxaLX83pzNS7ThUEyjOqkCU= +github.com/sigstore/timestamp-authority v1.2.8 h1:BEV3fkphwU4zBp3allFAhCqQb99HkiyCXB853RIwuEE= +github.com/sigstore/timestamp-authority v1.2.8/go.mod h1:G2/0hAZmLPnevEwT1S9IvtNHUm9Ktzvso6xuRhl94ZY= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= -github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= +github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= -github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262 h1:unQFBIznI+VYD1/1fApl1A+9VcBk+9dcqGfnePY87LY= -github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262/go.mod h1:MyOHs9Po2fbM1LHej6sBUT8ozbxmMOFG+E+rx/GSGuc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= -github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= -github.com/spiffe/go-spiffe/v2 v2.1.7 h1:VUkM1yIyg/x8X7u1uXqSRVRCdMdfRIEdFBzpqoeASGk= -github.com/spiffe/go-spiffe/v2 v2.1.7/go.mod h1:QJDGdhXllxjxvd5B+2XnhhXB/+rC8gr+lNrtOryiWeE= +github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= +github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= +github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= +github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.1 h1:4VhoImhV/Bm0ToFkXFi8hXNXwpDRZ/ynw3amt82mzq0= -github.com/stretchr/objx v0.5.1/go.mod h1:/iHQpkQwBD6DLUmQ4pE+s1TXdob1mORJ4/UFdrifcy0= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -909,17 +979,30 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM= +github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg= github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= +github.com/theupdateframework/go-tuf/v2 v2.1.1 h1:OWcoHItwsGO+7m0wLa7FDWPR4oB1cj0zOr1kosE4G+I= +github.com/theupdateframework/go-tuf/v2 v2.1.1/go.mod h1:V675cQGhZONR0OGQ8r1feO0uwtsTBYPDWHzAAPn5rjE= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0/go.mod h1:jY5YN2BqD/KSCHM9SqZPIpJNG/u3zwfLXHgws4x2IRw= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0 h1:6nAX1aRGnkg2SEUMwO5toB2tQkP0Jd6cbmZ/K5Le1V0= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0/go.mod h1:HOC5NWW1wBI2Vke1FGcRBvDATkEYE7AUDiYbXqi2sBw= +github.com/tink-crypto/tink-go/v2 v2.4.0 h1:8VPZeZI4EeZ8P/vB6SIkhlStrJfivTJn+cQ4dtyHNh0= +github.com/tink-crypto/tink-go/v2 v2.4.0/go.mod h1:l//evrF2Y3MjdbpNDNGnKgCpo5zSmvUvnQ4MU+yE2sw= +github.com/tinylib/msgp v1.3.0 h1:ULuf7GPooDaIlbyvgAxBV/FI7ynli6LZ1/nVUNu+0ww= +github.com/tinylib/msgp v1.3.0/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/tjfoc/gmsm v1.3.2/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w= @@ -927,18 +1010,16 @@ github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho= github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE= github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= -github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/xanzy/go-gitlab v0.96.0 h1:LGkZ+wSNMRtHIBaYE4Hq3dZVjprwHv3Y1+rhKU3WETs= -github.com/xanzy/go-gitlab v0.96.0/go.mod h1:ETg8tcj4OhrB84UEgeE8dSuV/0h4BBL1uOV/qK0vlyI= +github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= +github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vektah/gqlparser/v2 v2.5.26 h1:REqqFkO8+SOEgZHR/eHScjjVjGS8Nk3RMO/juiTobN4= +github.com/vektah/gqlparser/v2 v2.5.26/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo= +github.com/veraison/go-cose v1.3.0 h1:2/H5w8kdSpQJyVtIhx8gmwPJ2uSz1PkyWFx0idbd7rk= +github.com/veraison/go-cose v1.3.0/go.mod h1:df09OV91aHoQWLmy1KsDdYiagtXgyAwAl8vFeFn1gMc= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= -github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -948,102 +1029,126 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17 github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg= +github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= -github.com/ysmood/got v0.34.1 h1:IrV2uWLs45VXNvZqhJ6g2nIhY+pgIG1CUoOcqfXFl1s= -github.com/ysmood/got v0.34.1/go.mod h1:yddyjq/PmAf08RMLSwDjPyCvHvYed+WjHnQxpH851LM= +github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q= +github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg= github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= -github.com/ysmood/leakless v0.8.0 h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak= -github.com/ysmood/leakless v0.8.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= +github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU= +github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.30/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zalando/go-keyring v0.2.2 h1:f0xmpYiSrHtSNAVgwip93Cg8tuF45HJM6rHq/A5RI/4= -github.com/zalando/go-keyring v0.2.2/go.mod h1:sI3evg9Wvpw3+n4SqplGSJUMwtDeROfD4nsFz4z9PG0= +github.com/zalando/go-keyring v0.2.3 h1:v9CUu9phlABObO4LPWycf+zwMG7nlbb3t/B5wa97yms= +github.com/zalando/go-keyring v0.2.3/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ= -github.com/zeebo/errs v1.3.0 h1:hmiaKqgYZzcVgRL1Vkc1Mn2914BbzB0IBxs+ebeutGs= -github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= -go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= -go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/exporters/autoexport v0.46.1 h1:ysCfPZB9AjUlMa1UHYup3c9dAOCMQX/6sxSfPBUoxHw= -go.opentelemetry.io/contrib/exporters/autoexport v0.46.1/go.mod h1:ha0aiYm+DOPsLHjh0zoQ8W8sLT+LJ58J3j47lGpSLrU= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= -go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= -go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 h1:jd0+5t/YynESZqsSyPz+7PAFdEop0dlN0+PkyHYo8oI= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0/go.mod h1:U707O40ee1FpQGyhvqnzmCJm1Wh6OX6GGBVn0E6Uyyk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 h1:bflGWrfYyuulcdxf14V6n9+CoQcu5SAAdHmDPAJnlps= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0/go.mod h1:qcTO4xHAxZLaLxPd60TdE88rxtItPHgHWqOhOGRr0as= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 h1:H2JFgRcGiyHg7H7bwcwaQJYrNFqCqrbTQ8K4p1OvDu8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0/go.mod h1:WfCWp1bGoYK8MeULtI15MmQVczfR+bFkk0DF3h06QmQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= -go.opentelemetry.io/otel/exporters/prometheus v0.44.0 h1:08qeJgaPC0YEBu2PQMbqU3rogTlyzpjhCI2b58Yn00w= -go.opentelemetry.io/otel/exporters/prometheus v0.44.0/go.mod h1:ERL2uIeBtg4TxZdojHUwzZfIFlUIjZtxubT5p4h1Gjg= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0 h1:dEZWPjVN22urgYCza3PXRUGEyCB++y1sAqm6guWFesk= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0/go.mod h1:sTt30Evb7hJB/gEk27qLb1+l9n4Tb8HvHkR0Wx3S6CU= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0 h1:VhlEQAPp9R1ktYfrPk5SOryw1e9LDDTZCbIPFrho0ec= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0/go.mod h1:kB3ufRbfU+CQ4MlUcqtW8Z7YEOBeK2DJ6CmR5rYYF3E= -go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg= -go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= -go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= -go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc= -go.opentelemetry.io/otel/sdk/metric v1.21.0 h1:smhI5oD714d6jHE6Tie36fPx4WDFIg+Y6RfAY4ICcR0= -go.opentelemetry.io/otel/sdk/metric v1.21.0/go.mod h1:FJ8RAsoPGv/wYMgBdUJXOm+6pzFY3YdljnXtv1SBE8Q= -go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0= -go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= -go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= -go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= -go.starlark.net v0.0.0-20231121155337-90ade8b19d09 h1:hzy3LFnSN8kuQK8h9tHl4ndF6UruMj47OqwqsS+/Ai4= -go.starlark.net v0.0.0-20231121155337-90ade8b19d09/go.mod h1:LcLNIzVOMp4oV+uusnpk+VU+SzXaJakUuBjoCSWH5dM= -go.step.sm/crypto v0.42.1 h1:OmwHm3GJO8S4VGWL3k4+I+Q4P/F2s+j8msvTyGnh1Vg= -go.step.sm/crypto v0.42.1/go.mod h1:yNcTLFQBnYCA75fC5bklBoTAT7y0dRZsB1TkinB8JMs= +gitlab.com/gitlab-org/api/client-go v0.130.1 h1:1xF5C5Zq3sFeNg3PzS2z63oqrxifne3n/OnbI7nptRc= +gitlab.com/gitlab-org/api/client-go v0.130.1/go.mod h1:ZhSxLAWadqP6J9lMh40IAZOlOxBLPRh7yFOXR/bMJWM= +go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= +go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/bridges/prometheus v0.63.0 h1:/Rij/t18Y7rUayNg7Id6rPrEnHgorxYabm2E6wUdPP4= +go.opentelemetry.io/contrib/bridges/prometheus v0.63.0/go.mod h1:AdyDPn6pkbkt2w01n3BubRVk7xAsCRq1Yg1mpfyA/0E= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0 h1:ZoYbqX7OaA/TAikspPl3ozPI6iY6LiIY9I8cUfm+pJs= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts= +go.opentelemetry.io/contrib/exporters/autoexport v0.63.0 h1:NLnZybb9KkfMXPwZhd5diBYJoVxiO9Qa06dacEA7ySY= +go.opentelemetry.io/contrib/exporters/autoexport v0.63.0/go.mod h1:OvRg7gm5WRSCtxzGSsrFHbDLToYlStHNZQ+iPNIyD6g= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0 h1:OMqPldHt79PqWKOMYIAQs3CxAi7RLgPxwfFSwr4ZxtM= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0/go.mod h1:1biG4qiqTxKiUCtoWDPpL3fB3KxVwCiGw81j3nKMuHE= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0 h1:QQqYw3lkrzwVsoEX0w//EhH/TCnpRdEenKBOOEIMjWc= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0/go.mod h1:gSVQcr17jk2ig4jqJ2DX30IdWH251JcNAecvrqTxH1s= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 h1:vl9obrcoWVKp/lwl8tRE33853I8Xru9HFbw/skNeLs8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0/go.mod h1:GAXRxmLJcVM3u22IjTg74zWBrRCKq8BnOqUVLodpcpw= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0 h1:Oe2z/BCg5q7k4iXC3cqJxKYg0ieRiOqF0cecFYdPTwk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0/go.mod h1:ZQM5lAJpOsKnYagGg/zV2krVqTtaVdYdDkhMoX6Oalg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4= +go.opentelemetry.io/otel/exporters/prometheus v0.60.0 h1:cGtQxGvZbnrWdC2GyjZi0PDKVSLWP/Jocix3QWfXtbo= +go.opentelemetry.io/otel/exporters/prometheus v0.60.0/go.mod h1:hkd1EekxNo69PTV4OWFGZcKQiIqg0RfuWExcPKFvepk= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0 h1:B/g+qde6Mkzxbry5ZZag0l7QrQBCtVm7lVjaLgmpje8= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0/go.mod h1:mOJK8eMmgW6ocDJn6Bn11CcZ05gi3P8GylBXEkZtbgA= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0 h1:wm/Q0GAAykXv83wzcKzGGqAnnfLFyFe7RslekZuv+VI= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0/go.mod h1:ra3Pa40+oKjvYh+ZD3EdxFZZB0xdMfuileHAm4nNN7w= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE= +go.opentelemetry.io/otel/log v0.14.0 h1:2rzJ+pOAZ8qmZ3DDHg73NEKzSZkhkGIua9gXtxNGgrM= +go.opentelemetry.io/otel/log v0.14.0/go.mod h1:5jRG92fEAgx0SU/vFPxmJvhIuDU9E1SUnEQrMlJpOno= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/log v0.14.0 h1:JU/U3O7N6fsAXj0+CXz21Czg532dW2V4gG1HE/e8Zrg= +go.opentelemetry.io/otel/sdk/log v0.14.0/go.mod h1:imQvII+0ZylXfKU7/wtOND8Hn4OpT3YUoIgqJVksUkM= +go.opentelemetry.io/otel/sdk/log/logtest v0.14.0 h1:Ijbtz+JKXl8T2MngiwqBlPaHqc4YCaP/i13Qrow6gAM= +go.opentelemetry.io/otel/sdk/log/logtest v0.14.0/go.mod h1:dCU8aEL6q+L9cYTqcVOk8rM9Tp8WdnHOPLiBgp0SGOA= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/proto/otlp v1.8.0 h1:fRAZQDcAFHySxpJ1TwlA1cJ4tvcrw7nXl9xWWC8N5CE= +go.opentelemetry.io/proto/otlp v1.8.0/go.mod h1:tIeYOeNBU4cvmPqpaji1P+KbB4Oloai8wN4rWzRrFF0= +go.step.sm/crypto v0.66.0 h1:9TW6BEguOtcS9NIjja9bDQ+j8OjhenU/F6lJfHjbXNU= +go.step.sm/crypto v0.66.0/go.mod h1:anqGyvO/Px05D1mznHq4/a9wwP1I1DmMZvk+TWX5Dzo= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb h1:c0vyKkb6yr3KR7jEfJaOSv4lG7xPkbN6r52aJz1d8a8= -golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -1051,8 +1156,12 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1062,31 +1171,33 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1096,22 +1207,24 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1123,55 +1236,62 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1179,65 +1299,57 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= -gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/api v0.161.0 h1:oYzk/bs26WN10AV7iU7MVJVXBH8oCPS2hHyBiEeFoSU= -google.golang.org/api v0.161.0/go.mod h1:0mu0TpK33qnydLvWqbImq2b1eQ5FHRSDCBzAxX9ZHyw= +gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= +gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/api v0.248.0 h1:hUotakSkcwGdYUqzCRc5yGYsg4wXxpkKlW5ryVqvC1Y= +google.golang.org/api v0.248.0/go.mod h1:yAFUAF56Li7IuIQbTFoLwXTCI6XCFKueOlS7S9e4F9k= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 h1:nz5NESFLZbJGPFxDT/HCn+V1mZ8JGNoY4nUpmW/Y2eg= -google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917/go.mod h1:pZqR+glSb11aJ+JQcczCvgf47+duRuzNSKqE8YAQnV0= -google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 h1:rcS6EyEaoCO52hQDupoSfrxI3R6C2Tq741is7X8OvnM= -google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917/go.mod h1:CmlNWB9lSezaYELKS5Ym1r44VrrbPUa7JTvw+6MbpJ0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac h1:nUQEQmH/csSvFECKYRv6HWEyypysidKl2I6Qpsglq/0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= -google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v5 v5.7.0 h1:dGKGylPlZ/jus2g1YqhhyzfH0gPy2R8/MYUpW/OslTY= -gopkg.in/evanphx/json-patch.v5 v5.7.0/go.mod h1:/kvTRh1TVm5wuM6OkHxqXtE/1nUZZpihg29RtuIyfvk= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/go-jose/go-jose.v2 v2.6.1 h1:qEzJlIDmG9q5VO0M/o8tGS65QMHMS1w01TQJB1VPJ4U= -gopkg.in/go-jose/go-jose.v2 v2.6.1/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -1257,51 +1369,51 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= -helm.sh/helm/v3 v3.13.3 h1:0zPEdGqHcubehJHP9emCtzRmu8oYsJFRrlVF3TFj8xY= -helm.sh/helm/v3 v3.13.3/go.mod h1:3OKO33yI3p4YEXtTITN2+4oScsHeQe71KuzhlZ+aPfg= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +helm.sh/helm/v3 v3.19.0 h1:krVyCGa8fa/wzTZgqw0DUiXuRT5BPdeqE/sQXujQ22k= +helm.sh/helm/v3 v3.19.0/go.mod h1:Lk/SfzN0w3a3C3o+TdAKrLwJ0wcZ//t1/SDXAvfgDdc= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.28.6 h1:yy6u9CuIhmg55YvF/BavPBBXB+5QicB64njJXxVnzLo= -k8s.io/api v0.28.6/go.mod h1:AM6Ys6g9MY3dl/XNaNfg/GePI0FT7WBGu8efU/lirAo= -k8s.io/apiextensions-apiserver v0.28.6 h1:myB3iG/3v3jqCg28JDbOefu4sH2/erNEXgytRzJKBOo= -k8s.io/apiextensions-apiserver v0.28.6/go.mod h1:qlp6xRKBgyRhe5AYc81TQpLx4kLNK8/sGQUOwMkVjRk= -k8s.io/apimachinery v0.28.6 h1:RsTeR4z6S07srPg6XYrwXpTJVMXsjPXn0ODakMytSW0= -k8s.io/apimachinery v0.28.6/go.mod h1:QFNX/kCl/EMT2WTSz8k4WLCv2XnkOLMaL8GAVRMdpsA= -k8s.io/apiserver v0.28.6 h1:SfS5v4I5UGvh0q/1rzvNwLFsK+r7YzcsixnUc0NwoEk= -k8s.io/apiserver v0.28.6/go.mod h1:8n0aerS3kPm9usyB8B+an6/BZ5+Fa9fNqlASFdDDVwk= -k8s.io/cli-runtime v0.28.6 h1:bDH2+ZbHBK3NORGmIygj/zWOkVd/hGWg9RqAa5c/Ev0= -k8s.io/cli-runtime v0.28.6/go.mod h1:KFk67rlb7Pxh15uLbYGBUlW7ZUcpl7IM1GnHtskrcWA= -k8s.io/client-go v0.28.6 h1:Gge6ziyIdafRchfoBKcpaARuz7jfrK1R1azuwORIsQI= -k8s.io/client-go v0.28.6/go.mod h1:+nu0Yp21Oeo/cBCsprNVXB2BfJTV51lFfe5tXl2rUL8= -k8s.io/component-base v0.28.6 h1:G4T8VrcQ7xZou3by/fY5NU5mfxOBlWaivS2lPrEltAo= -k8s.io/component-base v0.28.6/go.mod h1:Dg62OOG3ALu2P4nAG00UdsuHoNLQJ5VsUZKQlLDcS+E= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/kube-openapi v0.0.0-20231206194836-bf4651e18aa8 h1:vzKzxN5uyJZLY8HL1/OovW7BJefnsBIWt8T7Gjh2boQ= -k8s.io/kube-openapi v0.0.0-20231206194836-bf4651e18aa8/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/kubectl v0.28.6 h1:46O3gGJYlpqy7wtwYlggieemyIcuZqmflnQVDci3MgY= -k8s.io/kubectl v0.28.6/go.mod h1:FS5ugZhi3kywpMQSCnp8MN+gctdFHJACzC6mH3fZ6lc= -k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= -k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -oras.land/oras-go v1.2.4 h1:djpBY2/2Cs1PV87GSJlxv4voajVOMZxqqtq9AB8YNvY= -oras.land/oras-go v1.2.4/go.mod h1:DYcGfb3YF1nKjcezfX2SNlDAeQFKSXmf+qrFmrh4324= -sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4= -sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.16.0 h1:/zAR4FOQDCkgSDmVzV2uiFbuy9bhu3jEzthrHCuvm1g= -sigs.k8s.io/kustomize/api v0.16.0/go.mod h1:MnFZ7IP2YqVyVwMWoRxPtgl/5hpA+eCCrQR/866cm5c= -sigs.k8s.io/kustomize/kyaml v0.16.0 h1:6J33uKSoATlKZH16unr2XOhDI+otoe2sR3M8PDzW3K0= -sigs.k8s.io/kustomize/kyaml v0.16.0/go.mod h1:xOK/7i+vmE14N2FdFyugIshB8eF6ALpy7jI87Q2nRh4= -sigs.k8s.io/release-utils v0.7.7 h1:JKDOvhCk6zW8ipEOkpTGDH/mW3TI+XqtPp16aaQ79FU= -sigs.k8s.io/release-utils v0.7.7/go.mod h1:iU7DGVNi3umZJ8q6aHyUFzsDUIaYwNnNKGHo3YE5E3s= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -software.sslmate.com/src/go-pkcs12 v0.2.0 h1:nlFkj7bTysH6VkC4fGphtjXRbezREPgrHuJG20hBGPE= -software.sslmate.com/src/go-pkcs12 v0.2.0/go.mod h1:23rNcYsMabIc1otwLpTkCCPwUq6kQsTyowttG/as0kQ= +k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE= +k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug= +k8s.io/apiextensions-apiserver v0.34.0 h1:B3hiB32jV7BcyKcMU5fDaDxk882YrJ1KU+ZSkA9Qxoc= +k8s.io/apiextensions-apiserver v0.34.0/go.mod h1:hLI4GxE1BDBy9adJKxUxCEHBGZtGfIg98Q+JmTD7+g0= +k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0= +k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/apiserver v0.34.0 h1:Z51fw1iGMqN7uJ1kEaynf2Aec1Y774PqU+FVWCFV3Jg= +k8s.io/apiserver v0.34.0/go.mod h1:52ti5YhxAvewmmpVRqlASvaqxt0gKJxvCeW7ZrwgazQ= +k8s.io/cli-runtime v0.34.0 h1:N2/rUlJg6TMEBgtQ3SDRJwa8XyKUizwjlOknT1mB2Cw= +k8s.io/cli-runtime v0.34.0/go.mod h1:t/skRecS73Piv+J+FmWIQA2N2/rDjdYSQzEE67LUUs8= +k8s.io/client-go v0.34.0 h1:YoWv5r7bsBfb0Hs2jh8SOvFbKzzxyNo0nSb0zC19KZo= +k8s.io/client-go v0.34.0/go.mod h1:ozgMnEKXkRjeMvBZdV1AijMHLTh3pbACPvK7zFR+QQY= +k8s.io/component-base v0.34.0 h1:bS8Ua3zlJzapklsB1dZgjEJuJEeHjj8yTu1gxE2zQX8= +k8s.io/component-base v0.34.0/go.mod h1:RSCqUdvIjjrEm81epPcjQ/DS+49fADvGSCkIP3IC6vg= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/kubectl v0.34.0 h1:NcXz4TPTaUwhiX4LU+6r6udrlm0NsVnSkP3R9t0dmxs= +k8s.io/kubectl v0.34.0/go.mod h1:bmd0W5i+HuG7/p5sqicr0Li0rR2iIhXL0oUyLF3OjR4= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= +oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= +sigs.k8s.io/controller-runtime v0.22.1 h1:Ah1T7I+0A7ize291nJZdS1CabF/lB4E++WizgV24Eqg= +sigs.k8s.io/controller-runtime v0.22.1/go.mod h1:FwiwRjkRPbiN+zp2QRp7wlTCzbUXxZ/D4OzuQUDwBHY= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= +sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= +sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78= +sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/release-utils v0.11.1 h1:hzvXGpHgHJfLOJB6TRuu14bzWc3XEglHmXHJqwClSZE= +sigs.k8s.io/release-utils v0.11.1/go.mod h1:ybR2V/uQAOGxYfzYtBenSYeXWkBGNP2qnEiX77ACtpc= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= +software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= +software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index e4b53a5f0..f186b9dd3 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -1,5 +1,5 @@ /* -Copyright 2023 The Flux authors +Copyright 2025 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/hack/ci/e2e.sh b/hack/ci/e2e.sh index ad4aaad7a..b00eda00c 100755 --- a/hack/ci/e2e.sh +++ b/hack/ci/e2e.sh @@ -144,6 +144,12 @@ kubectl -n source-system apply -f "${ROOT_DIR}/config/testdata/helmchart-from-oc kubectl -n source-system wait helmchart/podinfo --for=condition=ready --timeout=1m kubectl -n source-system wait helmchart/podinfo-keyless --for=condition=ready --timeout=1m +kubectl -n source-system apply -f "${ROOT_DIR}/config/testdata/helmchart-from-oci/notation.yaml" +curl -sSLo notation.crt https://raw.githubusercontent.com/stefanprodan/podinfo/master/.notation/notation.crt +curl -sSLo trustpolicy.json https://raw.githubusercontent.com/stefanprodan/podinfo/master/.notation/trustpolicy.json +kubectl -n source-system create secret generic notation-config --from-file=notation.crt --from-file=trustpolicy.json --dry-run=client -o yaml | kubectl apply -f - +kubectl -n source-system wait helmchart/podinfo-notation --for=condition=ready --timeout=1m + echo "Run OCIRepository verify tests" kubectl -n source-system apply -f "${ROOT_DIR}/config/testdata/ocirepository/signed-with-key.yaml" kubectl -n source-system apply -f "${ROOT_DIR}/config/testdata/ocirepository/signed-with-keyless.yaml" @@ -152,3 +158,6 @@ kubectl -n source-system create secret generic cosign-key --from-file=cosign.pub kubectl -n source-system wait ocirepository/podinfo-deploy-signed-with-key --for=condition=ready --timeout=1m kubectl -n source-system wait ocirepository/podinfo-deploy-signed-with-keyless --for=condition=ready --timeout=1m + +kubectl -n source-system apply -f "${ROOT_DIR}/config/testdata/ocirepository/signed-with-notation.yaml" +kubectl -n source-system wait ocirepository/podinfo-deploy-signed-with-notation --for=condition=ready --timeout=1m diff --git a/pkg/azure/blob.go b/internal/bucket/azure/blob.go similarity index 83% rename from pkg/azure/blob.go rename to internal/bucket/azure/blob.go index 940f429b7..5bf814b7d 100644 --- a/pkg/azure/blob.go +++ b/internal/bucket/azure/blob.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "io" + "net/http" "net/url" "os" "path/filepath" @@ -36,9 +37,11 @@ import ( corev1 "k8s.io/api/core/v1" ctrl "sigs.k8s.io/controller-runtime" + "github.com/fluxcd/pkg/auth" + azureauth "github.com/fluxcd/pkg/auth/azure" "github.com/fluxcd/pkg/masktoken" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) var ( @@ -64,6 +67,56 @@ type BlobClient struct { *azblob.Client } +// Option configures the BlobClient. +type Option func(*options) + +// WithSecret sets the Secret to use for the BlobClient. +func WithSecret(secret *corev1.Secret) Option { + return func(o *options) { + o.secret = secret + } +} + +// WithProxyURL sets the proxy URL to use for the BlobClient. +func WithProxyURL(proxyURL *url.URL) Option { + return func(o *options) { + o.proxyURL = proxyURL + } +} + +type options struct { + secret *corev1.Secret + proxyURL *url.URL + withoutCredentials bool + withoutRetries bool + authOpts []auth.Option +} + +// withoutCredentials forces the BlobClient to not use any credentials. +// This is a test-only option useful for testing the client with HTTP +// endpoints (without TLS) alongside all the other options unrelated to +// credentials. +func withoutCredentials() Option { + return func(o *options) { + o.withoutCredentials = true + } +} + +// withoutRetries sets the BlobClient to not retry requests. +// This is a test-only option useful for testing connection errors. +func withoutRetries() Option { + return func(o *options) { + o.withoutRetries = true + } +} + +// WithAuth sets the auth options for workload identity authentication. +func WithAuth(authOpts ...auth.Option) Option { + return func(o *options) { + o.authOpts = authOpts + } +} + // NewClient creates a new Azure Blob storage client. // The credential config on the client is set based on the data from the // Bucket and Secret. It detects credentials in the Secret in the following @@ -87,56 +140,80 @@ type BlobClient struct { // // If no credentials are found, and the azidentity.ChainedTokenCredential can // not be established. A simple client without credentials is returned. -func NewClient(obj *sourcev1.Bucket, secret *corev1.Secret) (c *BlobClient, err error) { +func NewClient(ctx context.Context, obj *sourcev1.Bucket, opts ...Option) (c *BlobClient, err error) { c = &BlobClient{} + var o options + for _, opt := range opts { + opt(&o) + } + + clientOpts := &azblob.ClientOptions{} + + if o.proxyURL != nil { + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.Proxy = http.ProxyURL(o.proxyURL) + clientOpts.ClientOptions.Transport = &http.Client{Transport: transport} + } + + if o.withoutRetries { + clientOpts.ClientOptions.Retry.ShouldRetry = func(resp *http.Response, err error) bool { + return false + } + } + + if o.withoutCredentials { + c.Client, err = azblob.NewClientWithNoCredential(obj.Spec.Endpoint, clientOpts) + return + } + var token azcore.TokenCredential - if secret != nil && len(secret.Data) > 0 { + if o.secret != nil && len(o.secret.Data) > 0 { // Attempt AAD Token Credential options first. - if token, err = tokenCredentialFromSecret(secret); err != nil { - err = fmt.Errorf("failed to create token credential from '%s' Secret: %w", secret.Name, err) + if token, err = tokenCredentialFromSecret(o.secret); err != nil { + err = fmt.Errorf("failed to create token credential from '%s' Secret: %w", o.secret.Name, err) return } if token != nil { - c.Client, err = azblob.NewClient(obj.Spec.Endpoint, token, nil) + c.Client, err = azblob.NewClient(obj.Spec.Endpoint, token, clientOpts) return } // Fallback to Shared Key Credential. var cred *azblob.SharedKeyCredential - if cred, err = sharedCredentialFromSecret(obj.Spec.Endpoint, secret); err != nil { + if cred, err = sharedCredentialFromSecret(obj.Spec.Endpoint, o.secret); err != nil { return } if cred != nil { - c.Client, err = azblob.NewClientWithSharedKeyCredential(obj.Spec.Endpoint, cred, &azblob.ClientOptions{}) + c.Client, err = azblob.NewClientWithSharedKeyCredential(obj.Spec.Endpoint, cred, clientOpts) return } var fullPath string - if fullPath, err = sasTokenFromSecret(obj.Spec.Endpoint, secret); err != nil { + if fullPath, err = sasTokenFromSecret(obj.Spec.Endpoint, o.secret); err != nil { return } - c.Client, err = azblob.NewClientWithNoCredential(fullPath, &azblob.ClientOptions{}) + c.Client, err = azblob.NewClientWithNoCredential(fullPath, clientOpts) return } // Compose token chain based on environment. // This functions as a replacement for azidentity.NewDefaultAzureCredential // to not shell out. - token, err = chainCredentialWithSecret(secret) + token, err = chainCredentialWithSecret(ctx, o.secret, o.authOpts...) if err != nil { err = fmt.Errorf("failed to create environment credential chain: %w", err) return nil, err } if token != nil { - c.Client, err = azblob.NewClient(obj.Spec.Endpoint, token, nil) + c.Client, err = azblob.NewClient(obj.Spec.Endpoint, token, clientOpts) return } // Fallback to simple client. - c.Client, err = azblob.NewClientWithNoCredential(obj.Spec.Endpoint, nil) + c.Client, err = azblob.NewClientWithNoCredential(obj.Spec.Endpoint, clientOpts) return } @@ -403,7 +480,7 @@ func sasTokenFromSecret(ep string, secret *corev1.Secret) (string, error) { // - azidentity.ManagedIdentityCredential with defaults. // // If no valid token is created, it returns nil. -func chainCredentialWithSecret(secret *corev1.Secret) (azcore.TokenCredential, error) { +func chainCredentialWithSecret(ctx context.Context, secret *corev1.Secret, opts ...auth.Option) (azcore.TokenCredential, error) { var creds []azcore.TokenCredential credOpts := &azidentity.EnvironmentCredentialOptions{} @@ -416,28 +493,7 @@ func chainCredentialWithSecret(secret *corev1.Secret) (azcore.TokenCredential, e if token, _ := azidentity.NewEnvironmentCredential(credOpts); token != nil { creds = append(creds, token) } - if clientID := os.Getenv("AZURE_CLIENT_ID"); clientID != "" { - if file, ok := os.LookupEnv("AZURE_FEDERATED_TOKEN_FILE"); ok { - if _, ok := os.LookupEnv("AZURE_AUTHORITY_HOST"); ok { - if tenantID, ok := os.LookupEnv("AZURE_TENANT_ID"); ok { - if token, _ := azidentity.NewWorkloadIdentityCredential(&azidentity.WorkloadIdentityCredentialOptions{ - ClientID: clientID, - TenantID: tenantID, - TokenFilePath: file, - }); token != nil { - creds = append(creds, token) - } - } - } - } - - if token, _ := azidentity.NewManagedIdentityCredential(&azidentity.ManagedIdentityCredentialOptions{ - ID: azidentity.ClientID(clientID), - }); token != nil { - creds = append(creds, token) - } - } - if token, _ := azidentity.NewManagedIdentityCredential(nil); token != nil { + if token := azureauth.NewTokenCredential(ctx, opts...); token != nil { creds = append(creds, token) } diff --git a/pkg/azure/blob_integration_test.go b/internal/bucket/azure/blob_integration_test.go similarity index 92% rename from pkg/azure/blob_integration_test.go rename to internal/bucket/azure/blob_integration_test.go index c468e9fca..704b4c0c3 100644 --- a/pkg/azure/blob_integration_test.go +++ b/internal/bucket/azure/blob_integration_test.go @@ -44,7 +44,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) var ( @@ -94,7 +94,7 @@ func TestMain(m *testing.M) { func TestBlobClient_BucketExists(t *testing.T) { g := NewWithT(t) - client, err := NewClient(testBucket.DeepCopy(), testSecret.DeepCopy()) + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) g.Expect(err).ToNot(HaveOccurred()) g.Expect(client).ToNot(BeNil()) @@ -120,7 +120,7 @@ func TestBlobClient_BucketExists(t *testing.T) { func TestBlobClient_BucketNotExists(t *testing.T) { g := NewWithT(t) - client, err := NewClient(testBucket.DeepCopy(), testSecret.DeepCopy()) + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) g.Expect(err).ToNot(HaveOccurred()) g.Expect(client).ToNot(BeNil()) @@ -140,7 +140,7 @@ func TestBlobClient_FGetObject(t *testing.T) { tempDir := t.TempDir() - client, err := NewClient(testBucket.DeepCopy(), testSecret.DeepCopy()) + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) g.Expect(err).ToNot(HaveOccurred()) g.Expect(client).ToNot(BeNil()) @@ -180,7 +180,7 @@ func TestBlobClientSASKey_FGetObject(t *testing.T) { tempDir := t.TempDir() // create a client with the shared key - client, err := NewClient(testBucket.DeepCopy(), testSecret.DeepCopy()) + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) g.Expect(err).ToNot(HaveOccurred()) g.Expect(client).ToNot(BeNil()) @@ -221,7 +221,7 @@ func TestBlobClientSASKey_FGetObject(t *testing.T) { }, } - sasKeyClient, err := NewClient(testBucket.DeepCopy(), testSASKeySecret.DeepCopy()) + sasKeyClient, err := NewClient(testBucket.DeepCopy(), WithSecret(testSASKeySecret.DeepCopy())) g.Expect(err).ToNot(HaveOccurred()) // Test if bucket and blob exists using sasKey. @@ -246,7 +246,7 @@ func TestBlobClientContainerSASKey_BucketExists(t *testing.T) { g := NewWithT(t) // create a client with the shared key - client, err := NewClient(testBucket.DeepCopy(), testSecret.DeepCopy()) + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) g.Expect(err).ToNot(HaveOccurred()) g.Expect(client).ToNot(BeNil()) @@ -286,7 +286,7 @@ func TestBlobClientContainerSASKey_BucketExists(t *testing.T) { }, } - sasKeyClient, err := NewClient(testBucket.DeepCopy(), testSASKeySecret.DeepCopy()) + sasKeyClient, err := NewClient(testBucket.DeepCopy(), WithSecret(testSASKeySecret.DeepCopy())) g.Expect(err).ToNot(HaveOccurred()) ctx, timeout = context.WithTimeout(context.Background(), testTimeout) @@ -308,7 +308,7 @@ func TestBlobClientContainerSASKey_BucketExists(t *testing.T) { func TestBlobClient_FGetObject_NotFoundErr(t *testing.T) { g := NewWithT(t) - client, err := NewClient(testBucket.DeepCopy(), testSecret.DeepCopy()) + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) g.Expect(err).ToNot(HaveOccurred()) g.Expect(client).ToNot(BeNil()) @@ -335,7 +335,7 @@ func TestBlobClient_FGetObject_NotFoundErr(t *testing.T) { func TestBlobClient_VisitObjects(t *testing.T) { g := NewWithT(t) - client, err := NewClient(testBucket.DeepCopy(), testSecret.DeepCopy()) + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) g.Expect(err).ToNot(HaveOccurred()) g.Expect(client).ToNot(BeNil()) @@ -361,7 +361,7 @@ func TestBlobClient_VisitObjects(t *testing.T) { // Visit objects. ctx, timeout = context.WithTimeout(context.Background(), testTimeout) defer timeout() - got := client.VisitObjects(ctx, testContainer, func(path, etag string) error { + got := client.VisitObjects(ctx, testContainer, "", func(path, etag string) error { visits[path] = etag return nil }) @@ -375,7 +375,7 @@ func TestBlobClient_VisitObjects(t *testing.T) { func TestBlobClient_VisitObjects_CallbackErr(t *testing.T) { g := NewWithT(t) - client, err := NewClient(testBucket.DeepCopy(), testSecret.DeepCopy()) + client, err := NewClient(testBucket.DeepCopy(), WithSecret(testSecret.DeepCopy())) g.Expect(err).ToNot(HaveOccurred()) g.Expect(client).ToNot(BeNil()) @@ -399,7 +399,7 @@ func TestBlobClient_VisitObjects_CallbackErr(t *testing.T) { ctx, timeout = context.WithTimeout(context.Background(), testTimeout) defer timeout() mockErr := fmt.Errorf("mock") - err = client.VisitObjects(ctx, testContainer, func(path, etag string) error { + err = client.VisitObjects(ctx, testContainer, "", func(path, etag string) error { return mockErr }) g.Expect(err).To(HaveOccurred()) diff --git a/pkg/azure/blob_test.go b/internal/bucket/azure/blob_test.go similarity index 81% rename from pkg/azure/blob_test.go rename to internal/bucket/azure/blob_test.go index 56a3ca0b9..83f17e900 100644 --- a/pkg/azure/blob_test.go +++ b/internal/bucket/azure/blob_test.go @@ -18,6 +18,7 @@ package azure import ( "bytes" + "context" "crypto/rand" "crypto/rsa" "crypto/x509" @@ -25,6 +26,7 @@ import ( "errors" "fmt" "math/big" + "net/http" "net/url" "testing" @@ -34,8 +36,96 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + testlistener "github.com/fluxcd/source-controller/tests/listener" + testproxy "github.com/fluxcd/source-controller/tests/proxy" ) +func TestNewClientAndBucketExistsWithProxy(t *testing.T) { + g := NewWithT(t) + + proxyAddr, _ := testproxy.New(t) + + // start mock bucket server + bucketListener, bucketAddr, _ := testlistener.New(t) + bucketEndpoint := fmt.Sprintf("http://%s", bucketAddr) + bucketHandler := http.NewServeMux() + bucketHandler.HandleFunc("GET /podinfo", func(w http.ResponseWriter, r *http.Request) { + // verify query params comp=list&maxresults=1&restype=container + q := r.URL.Query() + g.Expect(q.Get("comp")).To(Equal("list")) + g.Expect(q.Get("maxresults")).To(Equal("1")) + g.Expect(q.Get("restype")).To(Equal("container")) + // the azure library does not expose the struct for this response + // and copying its definition yields a strange "unsupported type" + // error when marshaling to xml, so we just hardcode a valid response + // here + resp := fmt.Sprintf(` + +1 + + +`, bucketEndpoint) + _, err := w.Write([]byte(resp)) + g.Expect(err).ToNot(HaveOccurred()) + }) + bucketServer := &http.Server{ + Addr: bucketAddr, + Handler: bucketHandler, + } + go bucketServer.Serve(bucketListener) + defer bucketServer.Shutdown(context.Background()) + + tests := []struct { + name string + endpoint string + proxyURL *url.URL + err string + }{ + { + name: "with correct proxy", + endpoint: bucketEndpoint, + proxyURL: &url.URL{Scheme: "http", Host: proxyAddr}, + }, + { + name: "with incorrect proxy", + endpoint: bucketEndpoint, + proxyURL: &url.URL{Scheme: "http", Host: fmt.Sprintf("localhost:%d", 1)}, + err: "connection refused", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + bucket := &sourcev1.Bucket{ + Spec: sourcev1.BucketSpec{ + Endpoint: tt.endpoint, + }, + } + + client, err := NewClient(t.Context(), + bucket, + WithProxyURL(tt.proxyURL), + withoutCredentials(), + withoutRetries()) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(client).ToNot(BeNil()) + + ok, err := client.BucketExists(context.Background(), "podinfo") + if tt.err != "" { + g.Expect(err).To(MatchError(ContainSubstring(tt.err))) + g.Expect(ok).To(BeFalse()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ok).To(BeTrue()) + } + }) + } +} + func TestValidateSecret(t *testing.T) { tests := []struct { name string @@ -383,7 +473,7 @@ func Test_sasTokenFromSecret(t *testing.T) { func Test_chainCredentialWithSecret(t *testing.T) { g := NewWithT(t) - got, err := chainCredentialWithSecret(nil) + got, err := chainCredentialWithSecret(t.Context(), nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(BeAssignableToTypeOf(&azidentity.ChainedTokenCredential{})) } diff --git a/pkg/gcp/gcp.go b/internal/bucket/gcp/gcp.go similarity index 57% rename from pkg/gcp/gcp.go rename to internal/bucket/gcp/gcp.go index 77011fada..70afe9fcd 100644 --- a/pkg/gcp/gcp.go +++ b/internal/bucket/gcp/gcp.go @@ -21,15 +21,24 @@ import ( "errors" "fmt" "io" + "net/http" + "net/url" "os" "path/filepath" gcpstorage "cloud.google.com/go/storage" "github.com/go-logr/logr" + "golang.org/x/oauth2/google" "google.golang.org/api/iterator" "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" corev1 "k8s.io/api/core/v1" ctrl "sigs.k8s.io/controller-runtime" + + "github.com/fluxcd/pkg/auth" + gcpauth "github.com/fluxcd/pkg/auth/gcp" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) var ( @@ -48,24 +57,110 @@ type GCSClient struct { *gcpstorage.Client } -// NewClient creates a new GCP storage client. The Client will automatically look for the Google Application +// Option is a functional option for configuring the GCS client. +type Option func(*options) + +// WithSecret sets the secret to use for authenticating with GCP. +func WithSecret(secret *corev1.Secret) Option { + return func(o *options) { + o.secret = secret + } +} + +// WithProxyURL sets the proxy URL to use for the GCS client. +func WithProxyURL(proxyURL *url.URL) Option { + return func(o *options) { + o.proxyURL = proxyURL + } +} + +// WithAuth sets the auth options for workload identity authentication. +func WithAuth(authOpts ...auth.Option) Option { + return func(o *options) { + o.authOpts = authOpts + } +} + +type options struct { + secret *corev1.Secret + proxyURL *url.URL + authOpts []auth.Option + + // newCustomHTTPClient should create a new HTTP client for interacting with the GCS API. + // This is a test-only option required for mocking the real logic, which requires either + // a valid Google Service Account Key or Controller-Level Workload Identity. Both are not available in tests. + // The real logic is implemented in the newHTTPClient function, which is used when + // constructing the default options object. + newCustomHTTPClient func(context.Context, *options) (*http.Client, error) +} + +func newOptions() *options { + return &options{ + newCustomHTTPClient: newHTTPClient, + } +} + +// NewClient creates a new GCP storage client. The Client will automatically look for the Google Application // Credential environment variable or look for the Google Application Credential file. -func NewClient(ctx context.Context, secret *corev1.Secret) (*GCSClient, error) { - c := &GCSClient{} - if secret != nil { - client, err := gcpstorage.NewClient(ctx, option.WithCredentialsJSON(secret.Data["serviceaccount"])) +func NewClient(ctx context.Context, bucket *sourcev1.Bucket, opts ...Option) (*GCSClient, error) { + o := newOptions() + for _, opt := range opts { + opt(o) + } + + var clientOpts []option.ClientOption + + switch { + case o.secret != nil && o.proxyURL == nil: + clientOpts = append(clientOpts, option.WithCredentialsJSON(o.secret.Data["serviceaccount"])) + case o.secret == nil && o.proxyURL == nil: + tokenSource := gcpauth.NewTokenSource(ctx, o.authOpts...) + clientOpts = append(clientOpts, option.WithTokenSource(tokenSource)) + default: // o.proxyURL != nil: + httpClient, err := o.newCustomHTTPClient(ctx, o) if err != nil { return nil, err } - c.Client = client - } else { - client, err := gcpstorage.NewClient(ctx) + clientOpts = append(clientOpts, option.WithHTTPClient(httpClient)) + } + + client, err := gcpstorage.NewClient(ctx, clientOpts...) + if err != nil { + return nil, err + } + + return &GCSClient{client}, nil +} + +// newHTTPClient creates a new HTTP client for interacting with Google Cloud APIs. +func newHTTPClient(ctx context.Context, o *options) (*http.Client, error) { + baseTransport := http.DefaultTransport.(*http.Transport).Clone() + if o.proxyURL != nil { + baseTransport.Proxy = http.ProxyURL(o.proxyURL) + } + + var opts []option.ClientOption + + if o.secret != nil { + // Here we can't use option.WithCredentialsJSON() because htransport.NewTransport() + // won't know what scopes to use and yield a 400 Bad Request error when retrieving + // the OAuth token. Instead we use google.CredentialsFromJSON(), which allows us to + // specify the GCS read-only scope. + creds, err := google.CredentialsFromJSON(ctx, o.secret.Data["serviceaccount"], gcpstorage.ScopeReadOnly) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to create Google credentials from secret: %w", err) } - c.Client = client + opts = append(opts, option.WithCredentials(creds)) + } else { // Workload Identity. + tokenSource := gcpauth.NewTokenSource(ctx, o.authOpts...) + opts = append(opts, option.WithTokenSource(tokenSource)) + } + + transport, err := htransport.NewTransport(ctx, baseTransport, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create Google HTTP transport: %w", err) } - return c, nil + return &http.Client{Transport: transport}, nil } // ValidateSecret validates the credential secret. The provided Secret may @@ -84,14 +179,14 @@ func ValidateSecret(secret *corev1.Secret) error { // exists, or returns a (client) error. func (c *GCSClient) BucketExists(ctx context.Context, bucketName string) (bool, error) { _, err := c.Client.Bucket(bucketName).Attrs(ctx) - if err == gcpstorage.ErrBucketNotExist { + if err == nil { + return true, nil + } + if errors.Is(err, gcpstorage.ErrBucketNotExist) { // Not returning error to be compatible with minio's API. return false, nil } - if err != nil { - return false, err - } - return true, nil + return false, err } // FGetObject gets the object from the provided object storage bucket, and diff --git a/pkg/gcp/gcp_test.go b/internal/bucket/gcp/gcp_test.go similarity index 67% rename from pkg/gcp/gcp_test.go rename to internal/bucket/gcp/gcp_test.go index 53989aafe..0c12a72ea 100644 --- a/pkg/gcp/gcp_test.go +++ b/internal/bucket/gcp/gcp_test.go @@ -26,19 +26,23 @@ import ( "net" "net/http" "net/http/httptest" + "net/url" "os" "path/filepath" "testing" "time" + "cloud.google.com/go/compute/metadata" gcpstorage "cloud.google.com/go/storage" + . "github.com/onsi/gomega" "google.golang.org/api/googleapi" + "google.golang.org/api/option" raw "google.golang.org/api/storage/v1" - "gotest.tools/assert" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "google.golang.org/api/option" + sourcev1 "github.com/fluxcd/source-controller/api/v1" + testproxy "github.com/fluxcd/source-controller/tests/proxy" ) const ( @@ -46,10 +50,13 @@ const ( objectName string = "test.yaml" objectGeneration int64 = 3 objectEtag string = "bFbHCDvedeecefdgmfmhfuRxBdcedGe96S82XJOAXxjJpk=" + envGCSHost string = "STORAGE_EMULATOR_HOST" + envADC string = "GOOGLE_APPLICATION_CREDENTIALS" ) var ( hc *http.Client + host string client *gcpstorage.Client close func() err error @@ -75,8 +82,24 @@ var ( } ) +// createTestBucket creates a test bucket for testing purposes +func createTestBucket() *sourcev1.Bucket { + return &sourcev1.Bucket{ + ObjectMeta: v1.ObjectMeta{ + Name: "test-bucket", + Namespace: "default", + }, + Spec: sourcev1.BucketSpec{ + BucketName: bucketName, + Endpoint: "storage.googleapis.com", + Provider: sourcev1.BucketProviderGoogle, + Interval: v1.Duration{Duration: time.Minute * 5}, + }, + } +} + func TestMain(m *testing.M) { - hc, close = newTestServer(func(w http.ResponseWriter, r *http.Request) { + hc, host, close = newTestServer(func(w http.ResponseWriter, r *http.Request) { io.Copy(io.Discard, r.Body) switch r.RequestURI { case fmt.Sprintf("/storage/v1/b/%s?alt=json&prettyPrint=false&projection=full", bucketName): @@ -104,6 +127,7 @@ func TestMain(m *testing.M) { case fmt.Sprintf("/storage/v1/b/%s/o?alt=json&delimiter=&endOffset=&pageToken=&prefix=&prettyPrint=false&projection=full&startOffset=&versions=false", bucketName): case fmt.Sprintf("/storage/v1/b/%s/o?alt=json&delimiter=&endOffset=&includeTrailingDelimiter=false&pageToken=&prefix=&prettyPrint=false&projection=full&startOffset=&versions=false", bucketName): case fmt.Sprintf("/storage/v1/b/%s/o?alt=json&delimiter=&endOffset=&includeTrailingDelimiter=false&matchGlob=&pageToken=&prefix=&prettyPrint=false&projection=full&startOffset=&versions=false", bucketName): + case fmt.Sprintf("/storage/v1/b/%s/o?alt=json&delimiter=&endOffset=&includeFoldersAsPrefixes=false&includeTrailingDelimiter=false&matchGlob=&pageToken=&prefix=&prettyPrint=false&projection=full&startOffset=&versions=false", bucketName): w.WriteHeader(200) response := &raw.Objects{} response.Items = append(response.Items, getObject()) @@ -139,10 +163,102 @@ func TestMain(m *testing.M) { } func TestNewClientWithSecretErr(t *testing.T) { - gcpClient, err := NewClient(context.Background(), secret.DeepCopy()) + bucket := createTestBucket() + gcpClient, err := NewClient(context.Background(), bucket, WithSecret(secret.DeepCopy())) t.Log(err) - assert.Error(t, err, "dialing: invalid character 'e' looking for beginning of value") - assert.Assert(t, gcpClient == nil) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal("dialing: invalid character 'e' looking for beginning of value")) + g.Expect(gcpClient).To(BeNil()) +} + +func TestNewClientWithProxyErr(t *testing.T) { + _, envADCIsSet := os.LookupEnv(envADC) + g := NewWithT(t) + g.Expect(envADCIsSet).To(BeFalse()) + g.Expect(metadata.OnGCE()).To(BeFalse()) + + t.Run("with secret", func(t *testing.T) { + g := NewWithT(t) + bucket := createTestBucket() + gcpClient, err := NewClient(context.Background(), bucket, + WithProxyURL(&url.URL{}), + WithSecret(secret.DeepCopy())) + g.Expect(err).To(HaveOccurred()) + g.Expect(gcpClient).To(BeNil()) + g.Expect(err.Error()).To(Equal("failed to create Google credentials from secret: invalid character 'e' looking for beginning of value")) + }) + + t.Run("without secret", func(t *testing.T) { + g := NewWithT(t) + bucket := createTestBucket() + gcpClient, err := NewClient(context.Background(), bucket, + WithProxyURL(&url.URL{})) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(gcpClient).NotTo(BeNil()) + bucketAttrs, err := gcpClient.Client.Bucket("some-bucket").Attrs(context.Background()) + g.Expect(err).To(HaveOccurred()) + g.Expect(bucketAttrs).To(BeNil()) + g.Expect(err.Error()).To(ContainSubstring("failed to create provider access token")) + }) +} + +func TestProxy(t *testing.T) { + proxyAddr, proxyPort := testproxy.New(t) + + err := os.Setenv(envGCSHost, fmt.Sprintf("https://%s", host)) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + defer func() { + err := os.Unsetenv(envGCSHost) + g.Expect(err).NotTo(HaveOccurred()) + }() + + tests := []struct { + name string + proxyURL *url.URL + err string + }{ + { + name: "with correct address", + proxyURL: &url.URL{Scheme: "http", Host: proxyAddr}, + }, + { + name: "with incorrect address", + proxyURL: &url.URL{Scheme: "http", Host: fmt.Sprintf("localhost:%d", proxyPort+1)}, + err: "connection refused", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + opts := []Option{WithProxyURL(tt.proxyURL)} + opts = append(opts, func(o *options) { + o.newCustomHTTPClient = func(ctx context.Context, o *options) (*http.Client, error) { + transport := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + Proxy: http.ProxyURL(o.proxyURL), + } + return &http.Client{Transport: transport}, nil + } + }) + bucket := createTestBucket() + gcpClient, err := NewClient(context.Background(), bucket, opts...) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(gcpClient).NotTo(BeNil()) + gcpClient.Client.SetRetry(gcpstorage.WithMaxAttempts(1)) + exists, err := gcpClient.BucketExists(context.Background(), bucketName) + if tt.err != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.err)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(exists).To(BeTrue()) + } + }) + } } func TestBucketExists(t *testing.T) { @@ -150,8 +266,9 @@ func TestBucketExists(t *testing.T) { Client: client, } exists, err := gcpClient.BucketExists(context.Background(), bucketName) - assert.NilError(t, err) - assert.Assert(t, exists) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(exists).To(BeTrue()) } func TestBucketNotExists(t *testing.T) { @@ -160,8 +277,9 @@ func TestBucketNotExists(t *testing.T) { Client: client, } exists, err := gcpClient.BucketExists(context.Background(), bucket) - assert.NilError(t, err) - assert.Assert(t, !exists) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(exists).To(BeFalse()) } func TestVisitObjects(t *testing.T) { @@ -175,12 +293,14 @@ func TestVisitObjects(t *testing.T) { etags = append(etags, etag) return nil }) - assert.NilError(t, err) - assert.DeepEqual(t, keys, []string{objectName}) - assert.DeepEqual(t, etags, []string{objectEtag}) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(keys).To(Equal([]string{objectName})) + g.Expect(etags).To(Equal([]string{objectEtag})) } func TestVisitObjectsErr(t *testing.T) { + g := NewWithT(t) gcpClient := &GCSClient{ Client: client, } @@ -188,7 +308,9 @@ func TestVisitObjectsErr(t *testing.T) { err := gcpClient.VisitObjects(context.Background(), badBucketName, "", func(key, etag string) error { return nil }) - assert.Error(t, err, fmt.Sprintf("listing objects from bucket '%s' failed: storage: bucket doesn't exist", badBucketName)) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring( + fmt.Sprintf("listing objects from bucket '%s' failed: storage: bucket doesn't exist", badBucketName))) } func TestVisitObjectsCallbackErr(t *testing.T) { @@ -199,10 +321,13 @@ func TestVisitObjectsCallbackErr(t *testing.T) { err := gcpClient.VisitObjects(context.Background(), bucketName, "", func(key, etag string) error { return mockErr }) - assert.Error(t, err, mockErr.Error()) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(mockErr.Error())) } func TestFGetObject(t *testing.T) { + g := NewWithT(t) tempDir := t.TempDir() gcpClient := &GCSClient{ Client: client, @@ -210,33 +335,34 @@ func TestFGetObject(t *testing.T) { localPath := filepath.Join(tempDir, objectName) etag, err := gcpClient.FGetObject(context.Background(), bucketName, objectName, localPath) if err != io.EOF { - assert.NilError(t, err) + g.Expect(err).NotTo(HaveOccurred()) } - assert.Equal(t, etag, objectEtag) + g.Expect(etag).To(Equal(objectEtag)) } func TestFGetObjectNotExists(t *testing.T) { + g := NewWithT(t) object := "notexists.txt" tempDir := t.TempDir() gcsClient := &GCSClient{ Client: client, } localPath := filepath.Join(tempDir, object) - _, err = gcsClient.FGetObject(context.Background(), bucketName, object, localPath) - if err != io.EOF { - assert.Error(t, err, "storage: object doesn't exist") - assert.Check(t, gcsClient.ObjectIsNotFound(err)) - } + _, err := gcsClient.FGetObject(context.Background(), bucketName, object, localPath) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("storage: object doesn't exist")) } func TestFGetObjectDirectoryIsFileName(t *testing.T) { + g := NewWithT(t) tempDir := t.TempDir() gcpClient := &GCSClient{ Client: client, } _, err = gcpClient.FGetObject(context.Background(), bucketName, objectName, tempDir) if err != io.EOF { - assert.Error(t, err, "filename is a directory") + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal("filename is a directory")) } } @@ -262,25 +388,28 @@ func TestValidateSecret(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() err := ValidateSecret(tt.secret) + g := NewWithT(t) if tt.error { - assert.Error(t, err, fmt.Sprintf("invalid '%v' secret data: required fields 'serviceaccount'", tt.secret.Name)) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(fmt.Sprintf("invalid '%v' secret data: required fields 'serviceaccount'", tt.secret.Name))) } else { - assert.NilError(t, err) + g.Expect(err).NotTo(HaveOccurred()) } }) } } -func newTestServer(handler func(w http.ResponseWriter, r *http.Request)) (*http.Client, func()) { +func newTestServer(handler func(w http.ResponseWriter, r *http.Request)) (*http.Client, string, func()) { ts := httptest.NewTLSServer(http.HandlerFunc(handler)) + host := ts.Listener.Addr().String() tlsConf := &tls.Config{InsecureSkipVerify: true} tr := &http.Transport{ TLSClientConfig: tlsConf, DialTLS: func(netw, addr string) (net.Conn, error) { - return tls.Dial("tcp", ts.Listener.Addr().String(), tlsConf) + return tls.Dial("tcp", host, tlsConf) }, } - return &http.Client{Transport: tr}, func() { + return &http.Client{Transport: tr}, host, func() { tr.CloseIdleConnections() ts.Close() } diff --git a/internal/bucket/minio/minio.go b/internal/bucket/minio/minio.go new file mode 100644 index 000000000..026200a83 --- /dev/null +++ b/internal/bucket/minio/minio.go @@ -0,0 +1,373 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package minio + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/s3utils" + corev1 "k8s.io/api/core/v1" + + "github.com/fluxcd/pkg/auth" + awsauth "github.com/fluxcd/pkg/auth/aws" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" +) + +// MinioClient is a minimal Minio client for fetching files from S3 compatible +// storage APIs. +type MinioClient struct { + *minio.Client +} + +// options holds the configuration for the Minio client. +type options struct { + secret *corev1.Secret + stsSecret *corev1.Secret + tlsConfig *tls.Config + stsTLSConfig *tls.Config + proxyURL *url.URL + authOpts []auth.Option +} + +// Option is a function that configures the Minio client. +type Option func(*options) + +// WithSecret sets the secret for the Minio client. +func WithSecret(secret *corev1.Secret) Option { + return func(o *options) { + o.secret = secret + } +} + +// WithTLSConfig sets the TLS configuration for the Minio client. +func WithTLSConfig(tlsConfig *tls.Config) Option { + return func(o *options) { + o.tlsConfig = tlsConfig + } +} + +// WithProxyURL sets the proxy URL for the Minio client. +func WithProxyURL(proxyURL *url.URL) Option { + return func(o *options) { + o.proxyURL = proxyURL + } +} + +// WithSTSSecret sets the STS secret for the Minio client. +func WithSTSSecret(secret *corev1.Secret) Option { + return func(o *options) { + o.stsSecret = secret + } +} + +// WithSTSTLSConfig sets the STS TLS configuration for the Minio client. +func WithSTSTLSConfig(tlsConfig *tls.Config) Option { + return func(o *options) { + o.stsTLSConfig = tlsConfig + } +} + +// WithAuth sets the auth options for workload identity authentication. +func WithAuth(authOpts ...auth.Option) Option { + return func(o *options) { + o.authOpts = authOpts + } +} + +// NewClient creates a new Minio storage client. +func NewClient(ctx context.Context, bucket *sourcev1.Bucket, opts ...Option) (*MinioClient, error) { + var o options + for _, opt := range opts { + opt(&o) + } + + minioOpts := minio.Options{ + Region: bucket.Spec.Region, + Secure: !bucket.Spec.Insecure, + // About BucketLookup, it should be noted that not all S3 providers support + // path-type access (e.g., Ali OSS). Hence, we revert to using the default + // auto access, which we believe can cover most use cases. + } + + switch bucketProvider := bucket.Spec.Provider; { + case o.secret != nil: + minioOpts.Creds = newCredsFromSecret(o.secret) + case bucketProvider == sourcev1.BucketProviderAmazon: + creds, err := newAWSCreds(ctx, &o) + if err != nil { + return nil, err + } + minioOpts.Creds = creds + case bucketProvider == sourcev1.BucketProviderGeneric: + minioOpts.Creds = newGenericCreds(bucket, &o) + } + + var transportOpts []func(*http.Transport) + + if minioOpts.Secure && o.tlsConfig != nil { + transportOpts = append(transportOpts, func(t *http.Transport) { + t.TLSClientConfig = o.tlsConfig.Clone() + }) + } + + if o.proxyURL != nil { + transportOpts = append(transportOpts, func(t *http.Transport) { + t.Proxy = http.ProxyURL(o.proxyURL) + }) + } + + if len(transportOpts) > 0 { + transport, err := minio.DefaultTransport(minioOpts.Secure) + if err != nil { + return nil, fmt.Errorf("failed to create default minio transport: %w", err) + } + for _, opt := range transportOpts { + opt(transport) + } + minioOpts.Transport = transport + } + + client, err := minio.New(bucket.Spec.Endpoint, &minioOpts) + if err != nil { + return nil, err + } + return &MinioClient{Client: client}, nil +} + +// newCredsFromSecret creates a new Minio credentials object from the provided +// secret. +func newCredsFromSecret(secret *corev1.Secret) *credentials.Credentials { + var accessKey, secretKey string + if k, ok := secret.Data["accesskey"]; ok { + accessKey = string(k) + } + if k, ok := secret.Data["secretkey"]; ok { + secretKey = string(k) + } + if accessKey != "" && secretKey != "" { + return credentials.NewStaticV4(accessKey, secretKey, "") + } + return nil +} + +// newAWSCreds creates a new Minio credentials object for `aws` bucket provider. +// +// This function is only called when Secret authentication is not available. +// +// Uses AWS SDK's config.LoadDefaultConfig() which supports: +// - Workload Identity (IRSA/EKS Pod Identity) +// - EC2 instance profiles +// - Environment variables +// - Shared credentials files +// - All other AWS SDK authentication methods +func newAWSCreds(ctx context.Context, o *options) (*credentials.Credentials, error) { + var opts auth.Options + opts.Apply(o.authOpts...) + + awsCredsProvider := awsauth.NewCredentialsProvider(ctx, o.authOpts...) + awsCreds, err := awsCredsProvider.Retrieve(ctx) + if err != nil { + return nil, fmt.Errorf("AWS authentication failed: %w", err) + } + + return credentials.NewStaticV4( + awsCreds.AccessKeyID, + awsCreds.SecretAccessKey, + awsCreds.SessionToken, + ), nil +} + +// newGenericCreds creates a new Minio credentials object for the `generic` bucket provider. +func newGenericCreds(bucket *sourcev1.Bucket, o *options) *credentials.Credentials { + + sts := bucket.Spec.STS + if sts == nil { + return nil + } + + switch sts.Provider { + case sourcev1.STSProviderLDAP: + client := &http.Client{Transport: http.DefaultTransport} + if o.proxyURL != nil || o.stsTLSConfig != nil { + transport := http.DefaultTransport.(*http.Transport).Clone() + if o.proxyURL != nil { + transport.Proxy = http.ProxyURL(o.proxyURL) + } + if o.stsTLSConfig != nil { + transport.TLSClientConfig = o.stsTLSConfig.Clone() + } + client = &http.Client{Transport: transport} + } + var username, password string + if o.stsSecret != nil { + username = string(o.stsSecret.Data["username"]) + password = string(o.stsSecret.Data["password"]) + } + return credentials.New(&credentials.LDAPIdentity{ + Client: client, + STSEndpoint: sts.Endpoint, + LDAPUsername: username, + LDAPPassword: password, + }) + } + + return nil +} + +// ValidateSecret validates the credential secret. The provided Secret may +// be nil. +func ValidateSecret(secret *corev1.Secret) error { + if secret == nil { + return nil + } + err := fmt.Errorf("invalid '%s' secret data: required fields 'accesskey' and 'secretkey'", secret.Name) + if _, ok := secret.Data["accesskey"]; !ok { + return err + } + if _, ok := secret.Data["secretkey"]; !ok { + return err + } + return nil +} + +// ValidateSTSProvider validates the STS provider. +func ValidateSTSProvider(bucketProvider string, sts *sourcev1.BucketSTSSpec) error { + errProviderIncompatbility := fmt.Errorf("STS provider '%s' is not supported for '%s' bucket provider", + sts.Provider, bucketProvider) + errSecretNotRequired := fmt.Errorf("spec.sts.secretRef is not required for the '%s' STS provider", + sts.Provider) + errCertSecretNotRequired := fmt.Errorf("spec.sts.certSecretRef is not required for the '%s' STS provider", + sts.Provider) + + switch bucketProvider { + case sourcev1.BucketProviderAmazon: + switch sts.Provider { + case sourcev1.STSProviderAmazon: + if sts.SecretRef != nil { + return errSecretNotRequired + } + if sts.CertSecretRef != nil { + return errCertSecretNotRequired + } + return nil + default: + return errProviderIncompatbility + } + case sourcev1.BucketProviderGeneric: + switch sts.Provider { + case sourcev1.STSProviderLDAP: + return nil + default: + return errProviderIncompatbility + } + } + + return fmt.Errorf("STS configuration is not supported for '%s' bucket provider", bucketProvider) +} + +// ValidateSTSSecret validates the STS secret. The provided Secret may be nil. +func ValidateSTSSecret(stsProvider string, secret *corev1.Secret) error { + switch stsProvider { + case sourcev1.STSProviderLDAP: + return validateSTSSecretForProvider(stsProvider, secret, "username", "password") + default: + return nil + } +} + +// validateSTSSecretForProvider validates the STS secret for each provider. +// The provided Secret may be nil. +func validateSTSSecretForProvider(stsProvider string, secret *corev1.Secret, keys ...string) error { + if secret == nil { + return nil + } + err := fmt.Errorf("invalid '%s' secret data for '%s' STS provider: required fields %s", + secret.Name, stsProvider, strings.Join(keys, ", ")) + if len(secret.Data) == 0 { + return err + } + for _, key := range keys { + value, ok := secret.Data[key] + if !ok || len(value) == 0 { + return err + } + } + return nil +} + +// FGetObject gets the object from the provided object storage bucket, and +// writes it to targetPath. +// It returns the etag of the successfully fetched file, or any error. +func (c *MinioClient) FGetObject(ctx context.Context, bucketName, objectName, localPath string) (string, error) { + stat, err := c.Client.StatObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + return "", err + } + opts := minio.GetObjectOptions{} + if err = opts.SetMatchETag(stat.ETag); err != nil { + return "", err + } + if err = c.Client.FGetObject(ctx, bucketName, objectName, localPath, opts); err != nil { + return "", err + } + return stat.ETag, nil +} + +// VisitObjects iterates over the items in the provided object storage +// bucket, calling visit for every item. +// If the underlying client or the visit callback returns an error, +// it returns early. +func (c *MinioClient) VisitObjects(ctx context.Context, bucketName string, prefix string, visit func(key, etag string) error) error { + for object := range c.Client.ListObjects(ctx, bucketName, minio.ListObjectsOptions{ + Recursive: true, + Prefix: prefix, + UseV1: s3utils.IsGoogleEndpoint(*c.Client.EndpointURL()), + }) { + if object.Err != nil { + err := fmt.Errorf("listing objects from bucket '%s' failed: %w", bucketName, object.Err) + return err + } + + if err := visit(object.Key, object.ETag); err != nil { + return err + } + } + return nil +} + +// ObjectIsNotFound checks if the error provided is a minio.ErrResponse +// with "NoSuchKey" code. +func (c *MinioClient) ObjectIsNotFound(err error) bool { + if resp := new(minio.ErrorResponse); errors.As(err, resp) { + return resp.Code == "NoSuchKey" + } + return false +} + +// Close closes the Minio Client and logs any useful errors. +func (c *MinioClient) Close(_ context.Context) { + // Minio client does not provide a close method +} diff --git a/internal/bucket/minio/minio_test.go b/internal/bucket/minio/minio_test.go new file mode 100644 index 000000000..d6ba7baa4 --- /dev/null +++ b/internal/bucket/minio/minio_test.go @@ -0,0 +1,825 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package minio + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/xml" + "errors" + "fmt" + "log" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/google/uuid" + miniov7 "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + . "github.com/onsi/gomega" + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/sourceignore" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + testlistener "github.com/fluxcd/source-controller/tests/listener" + testproxy "github.com/fluxcd/source-controller/tests/proxy" +) + +const ( + objectName string = "test.yaml" + objectEtag string = "b07bba5a280b58791bc78fb9fc414b09" +) + +var ( + // testMinioVersion is the version (image tag) of the Minio server image + // used to test against. + testMinioVersion = "RELEASE.2024-05-07T06-41-25Z" + // testMinioRootUser is the root user of the Minio server. + testMinioRootUser = "fluxcd" + // testMinioRootPassword is the root password of the Minio server. + testMinioRootPassword = "passw0rd!" + // testVaultAddress is the address of the Minio server, it is set + // by TestMain after booting it. + testMinioAddress string + // testMinioClient is the Minio client used to test against, it is set + // by TestMain after booting the Minio server. + testMinioClient *MinioClient + // testTLSConfig is the TLS configuration used to connect to the Minio server. + testTLSConfig *tls.Config + // testServerCert is the path to the server certificate used to start the Minio + // and STS servers. + testServerCert string + // testServerKey is the path to the server key used to start the Minio and STS servers. + testServerKey string + // ctx is the common context used in tests. + ctx context.Context +) + +var ( + bucketName = "test-bucket-minio" + uuid.New().String() + prefix = "" + secret = corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: "minio-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "accesskey": []byte(testMinioRootUser), + "secretkey": []byte(testMinioRootPassword), + }, + Type: "Opaque", + } + emptySecret = corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: "minio-secret", + Namespace: "default", + }, + Data: map[string][]byte{}, + Type: "Opaque", + } + bucket = sourcev1.Bucket{ + ObjectMeta: v1.ObjectMeta{ + Name: "minio-test-bucket", + Namespace: "default", + }, + Spec: sourcev1.BucketSpec{ + BucketName: bucketName, + Provider: "generic", + SecretRef: &meta.LocalObjectReference{ + Name: secret.Name, + }, + }, + } + bucketAwsProvider = sourcev1.Bucket{ + ObjectMeta: v1.ObjectMeta{ + Name: "minio-test-bucket", + Namespace: "default", + }, + Spec: sourcev1.BucketSpec{ + BucketName: bucketName, + Provider: "aws", + }, + } +) + +func TestMain(m *testing.M) { + // Initialize common test context + ctx = context.Background() + + // Uses a sensible default on Windows (TCP/HTTP) and Linux/MacOS (socket) + pool, err := dockertest.NewPool("") + if err != nil { + log.Fatalf("could not connect to docker: %s", err) + } + + // Load a private key and certificate from a self-signed CA for the Minio server and + // a client TLS configuration to connect to the Minio server. + testServerCert, testServerKey, testTLSConfig, err = loadServerCertAndClientTLSConfig() + if err != nil { + log.Fatalf("could not load server cert and client TLS config: %s", err) + } + + // Pull the image, create a container based on it, and run it + resource, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "minio/minio", + Tag: testMinioVersion, + ExposedPorts: []string{ + "9000/tcp", + "9001/tcp", + }, + Env: []string{ + "MINIO_ROOT_USER=" + testMinioRootUser, + "MINIO_ROOT_PASSWORD=" + testMinioRootPassword, + }, + Cmd: []string{"server", "/data", "--console-address", ":9001"}, + Mounts: []string{ + fmt.Sprintf("%s:/root/.minio/certs/public.crt", testServerCert), + fmt.Sprintf("%s:/root/.minio/certs/private.key", testServerKey), + }, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + }) + if err != nil { + log.Fatalf("could not start resource: %s", err) + } + + purgeResource := func() { + if err := pool.Purge(resource); err != nil { + log.Printf("could not purge resource: %s", err) + } + } + + // Set the address of the Minio server used for testing. + testMinioAddress = fmt.Sprintf("127.0.0.1:%v", resource.GetPort("9000/tcp")) + + // Construct a Minio client using the address of the Minio server. + testMinioClient, err = NewClient(ctx, bucketStub(bucket, testMinioAddress), + WithSecret(secret.DeepCopy()), + WithTLSConfig(testTLSConfig)) + if err != nil { + log.Fatalf("cannot create Minio client: %s", err) + } + + // Wait until Minio is ready to serve requests... + if err := pool.Retry(func() error { + hCancel, err := testMinioClient.HealthCheck(1 * time.Second) + if err != nil { + log.Fatalf("cannot start Minio health check: %s", err) + } + defer hCancel() + + if !testMinioClient.IsOnline() { + return fmt.Errorf("client is offline: Minio is not ready") + } + return nil + }); err != nil { + purgeResource() + log.Fatalf("could not connect to docker: %s", err) + } + + createBucket(ctx) + addObjectToBucket(ctx) + run := m.Run() + removeObjectFromBucket(ctx) + deleteBucket(ctx) + purgeResource() + os.Exit(run) +} + +func TestNewClient(t *testing.T) { + minioClient, err := NewClient(ctx, bucketStub(bucket, testMinioAddress), + WithSecret(secret.DeepCopy()), + WithTLSConfig(testTLSConfig)) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(minioClient).NotTo(BeNil()) +} + +func TestNewClientEmptySecret(t *testing.T) { + minioClient, err := NewClient(ctx, bucketStub(bucket, testMinioAddress), + WithSecret(emptySecret.DeepCopy()), + WithTLSConfig(testTLSConfig)) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(minioClient).NotTo(BeNil()) +} + +func TestNewClientAWSProvider(t *testing.T) { + t.Run("with secret", func(t *testing.T) { + validSecret := corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: "valid-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "accesskey": []byte(testMinioRootUser), + "secretkey": []byte(testMinioRootPassword), + }, + Type: "Opaque", + } + + bucket := bucketStub(bucketAwsProvider, testMinioAddress) + minioClient, err := NewClient(ctx, bucket, WithSecret(&validSecret)) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(minioClient).NotTo(BeNil()) + }) + + t.Run("without secret", func(t *testing.T) { + bucket := bucketStub(bucketAwsProvider, testMinioAddress) + minioClient, err := NewClient(ctx, bucket) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("AWS authentication failed")) + g.Expect(minioClient).To(BeNil()) + }) +} + +func TestBucketExists(t *testing.T) { + exists, err := testMinioClient.BucketExists(ctx, bucketName) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(exists).To(BeTrue()) +} + +func TestBucketNotExists(t *testing.T) { + exists, err := testMinioClient.BucketExists(ctx, "notexistsbucket") + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(exists).To(BeFalse()) +} + +func TestFGetObject(t *testing.T) { + tempDir := t.TempDir() + path := filepath.Join(tempDir, sourceignore.IgnoreFile) + _, err := testMinioClient.FGetObject(ctx, bucketName, objectName, path) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) +} + +func TestNewClientAndFGetObjectWithSTSEndpoint(t *testing.T) { + var credsRetrieved bool + + // start a mock LDAP STS server + ldapSTSListener, ldapSTSAddr, _ := testlistener.New(t) + ldapSTSEndpoint := fmt.Sprintf("https://%s", ldapSTSAddr) + ldapSTSHandler := http.NewServeMux() + var ldapUsername, ldapPassword string + ldapSTSHandler.HandleFunc("POST /", + func(w http.ResponseWriter, r *http.Request) { + g := NewWithT(t) + err := r.ParseForm() + g.Expect(err).NotTo(HaveOccurred()) + username := r.Form.Get("LDAPUsername") + password := r.Form.Get("LDAPPassword") + g.Expect(username).To(Equal(ldapUsername)) + g.Expect(password).To(Equal(ldapPassword)) + var result credentials.LDAPIdentityResult + result.Credentials.AccessKey = testMinioRootUser + result.Credentials.SecretKey = testMinioRootPassword + err = xml.NewEncoder(w).Encode(credentials.AssumeRoleWithLDAPResponse{Result: result}) + g.Expect(err).NotTo(HaveOccurred()) + credsRetrieved = true + }) + ldapSTSServer := &http.Server{ + Addr: ldapSTSAddr, + Handler: ldapSTSHandler, + } + go ldapSTSServer.ServeTLS(ldapSTSListener, testServerCert, testServerKey) + defer ldapSTSServer.Shutdown(ctx) + + // start proxy + proxyAddr, proxyPort := testproxy.New(t) + + tests := []struct { + name string + provider string + stsSpec *sourcev1.BucketSTSSpec + opts []Option + ldapUsername string + ldapPassword string + err string + }{ + { + name: "with correct ldap endpoint", + provider: "generic", + stsSpec: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: ldapSTSEndpoint, + }, + opts: []Option{WithSTSTLSConfig(testTLSConfig)}, + }, + { + name: "with incorrect ldap endpoint", + provider: "generic", + stsSpec: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: fmt.Sprintf("http://localhost:%d", 1), + }, + err: "connection refused", + }, + { + name: "with correct ldap endpoint and secret", + provider: "generic", + stsSpec: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: ldapSTSEndpoint, + }, + opts: []Option{ + WithSTSTLSConfig(testTLSConfig), + WithSTSSecret(&corev1.Secret{ + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte("password"), + }, + }), + }, + ldapUsername: "user", + ldapPassword: "password", + }, + { + name: "with correct ldap endpoint and proxy", + provider: "generic", + stsSpec: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: ldapSTSEndpoint, + }, + opts: []Option{ + WithProxyURL(&url.URL{Scheme: "http", Host: proxyAddr}), + WithSTSTLSConfig(testTLSConfig), + }, + }, + { + name: "with correct ldap endpoint and incorrect proxy", + provider: "generic", + stsSpec: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: ldapSTSEndpoint, + }, + opts: []Option{ + WithProxyURL(&url.URL{Scheme: "http", Host: fmt.Sprintf("localhost:%d", proxyPort+1)}), + }, + err: "connection refused", + }, + { + name: "with correct ldap endpoint and without client tls config", + provider: "generic", + stsSpec: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: ldapSTSEndpoint, + }, + err: "tls: failed to verify certificate", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + credsRetrieved = false + ldapUsername = tt.ldapUsername + ldapPassword = tt.ldapPassword + + bucket := bucketStub(bucket, testMinioAddress) + bucket.Spec.Provider = tt.provider + bucket.Spec.STS = tt.stsSpec + + opts := tt.opts + opts = append(opts, WithTLSConfig(testTLSConfig)) + + minioClient, err := NewClient(ctx, bucket, opts...) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(minioClient).NotTo(BeNil()) + + path := filepath.Join(t.TempDir(), sourceignore.IgnoreFile) + _, err = minioClient.FGetObject(ctx, bucketName, objectName, path) + if tt.err != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.err)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(credsRetrieved).To(BeTrue()) + } + }) + } +} + +func TestNewClientAndFGetObjectWithProxy(t *testing.T) { + proxyAddr, proxyPort := testproxy.New(t) + + tests := []struct { + name string + proxyURL *url.URL + errSubstring string + }{ + { + name: "with correct proxy", + proxyURL: &url.URL{Scheme: "http", Host: proxyAddr}, + }, + { + name: "with incorrect proxy", + proxyURL: &url.URL{Scheme: "http", Host: fmt.Sprintf("localhost:%d", proxyPort+1)}, + errSubstring: "connection refused", + }, + } + + // run test + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + minioClient, err := NewClient(ctx, bucketStub(bucket, testMinioAddress), + WithSecret(secret.DeepCopy()), + WithTLSConfig(testTLSConfig), + WithProxyURL(tt.proxyURL)) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(minioClient).NotTo(BeNil()) + tempDir := t.TempDir() + path := filepath.Join(tempDir, sourceignore.IgnoreFile) + _, err = minioClient.FGetObject(ctx, bucketName, objectName, path) + if tt.errSubstring != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.errSubstring)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + }) + } +} + +func TestFGetObjectNotExists(t *testing.T) { + tempDir := t.TempDir() + badKey := "invalid.txt" + path := filepath.Join(tempDir, badKey) + _, err := testMinioClient.FGetObject(ctx, bucketName, badKey, path) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal("The specified key does not exist.")) + g.Expect(testMinioClient.ObjectIsNotFound(err)).To(BeTrue()) +} + +func TestVisitObjects(t *testing.T) { + keys := []string{} + etags := []string{} + err := testMinioClient.VisitObjects(context.TODO(), bucketName, prefix, func(key, etag string) error { + keys = append(keys, key) + etags = append(etags, etag) + return nil + }) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(keys).To(Equal([]string{objectName})) + g.Expect(etags).To(Equal([]string{objectEtag})) +} + +func TestVisitObjectsErr(t *testing.T) { + badBucketName := "bad-bucket" + err := testMinioClient.VisitObjects(ctx, badBucketName, prefix, func(string, string) error { + return nil + }) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(fmt.Sprintf("listing objects from bucket '%s' failed: The specified bucket does not exist", badBucketName))) +} + +func TestVisitObjectsCallbackErr(t *testing.T) { + mockErr := fmt.Errorf("mock") + err := testMinioClient.VisitObjects(context.TODO(), bucketName, prefix, func(key, etag string) error { + return mockErr + }) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(mockErr.Error())) +} + +func TestValidateSecret(t *testing.T) { + t.Parallel() + testCases := []struct { + name string + secret *corev1.Secret + error bool + }{ + { + name: "valid secret", + secret: secret.DeepCopy(), + }, + { + name: "nil secret", + secret: nil, + }, + { + name: "invalid secret", + secret: emptySecret.DeepCopy(), + error: true, + }, + } + for _, testCase := range testCases { + tt := testCase + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + err := ValidateSecret(tt.secret) + if tt.error { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(fmt.Sprintf("invalid '%v' secret data: required fields 'accesskey' and 'secretkey'", tt.secret.Name))) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + }) + } +} + +func TestValidateSTSProvider(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + bucketProvider string + stsProvider string + withSecret bool + withCertSecret bool + err string + }{ + { + name: "aws", + bucketProvider: "aws", + stsProvider: "aws", + }, + { + name: "aws does not require a secret", + bucketProvider: "aws", + stsProvider: "aws", + withSecret: true, + err: "spec.sts.secretRef is not required for the 'aws' STS provider", + }, + { + name: "aws does not require a cert secret", + bucketProvider: "aws", + stsProvider: "aws", + withCertSecret: true, + err: "spec.sts.certSecretRef is not required for the 'aws' STS provider", + }, + { + name: "ldap", + bucketProvider: "generic", + stsProvider: "ldap", + }, + { + name: "ldap may use a secret", + bucketProvider: "generic", + stsProvider: "ldap", + withSecret: true, + }, + { + name: "ldap may use a cert secret", + bucketProvider: "generic", + stsProvider: "ldap", + withCertSecret: true, + }, + { + name: "ldap sts provider unsupported for aws bucket provider", + bucketProvider: "aws", + stsProvider: "ldap", + err: "STS provider 'ldap' is not supported for 'aws' bucket provider", + }, + { + name: "aws sts provider unsupported for generic bucket provider", + bucketProvider: "generic", + stsProvider: "aws", + err: "STS provider 'aws' is not supported for 'generic' bucket provider", + }, + { + name: "unsupported bucket provider", + bucketProvider: "gcp", + stsProvider: "ldap", + err: "STS configuration is not supported for 'gcp' bucket provider", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + sts := &sourcev1.BucketSTSSpec{ + Provider: tt.stsProvider, + } + if tt.withSecret { + sts.SecretRef = &meta.LocalObjectReference{} + } + if tt.withCertSecret { + sts.CertSecretRef = &meta.LocalObjectReference{} + } + g := NewWithT(t) + err := ValidateSTSProvider(tt.bucketProvider, sts) + if tt.err != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(tt.err)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + }) + } +} + +func TestValidateSTSSecret(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + provider string + secret *corev1.Secret + err string + }{ + { + name: "ldap provider does not require a secret", + provider: "ldap", + }, + { + name: "valid ldap secret", + provider: "ldap", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte("pass"), + }, + }, + }, + { + name: "empty ldap secret", + provider: "ldap", + secret: &corev1.Secret{ObjectMeta: v1.ObjectMeta{Name: "ldap-secret"}}, + err: "invalid 'ldap-secret' secret data for 'ldap' STS provider: required fields username, password", + }, + { + name: "ldap secret missing password", + provider: "ldap", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "username": []byte("user"), + }, + }, + err: "invalid '' secret data for 'ldap' STS provider: required fields username, password", + }, + { + name: "ldap secret missing username", + provider: "ldap", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "password": []byte("pass"), + }, + }, + err: "invalid '' secret data for 'ldap' STS provider: required fields username, password", + }, + { + name: "ldap secret with empty username", + provider: "ldap", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "username": []byte(""), + "password": []byte("pass"), + }, + }, + err: "invalid '' secret data for 'ldap' STS provider: required fields username, password", + }, + { + name: "ldap secret with empty password", + provider: "ldap", + secret: &corev1.Secret{ + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte(""), + }, + }, + err: "invalid '' secret data for 'ldap' STS provider: required fields username, password", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + g := NewWithT(t) + err := ValidateSTSSecret(tt.provider, tt.secret) + if tt.err != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(tt.err)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + }) + } +} + +func bucketStub(bucket sourcev1.Bucket, endpoint string) *sourcev1.Bucket { + b := bucket.DeepCopy() + b.Spec.Endpoint = endpoint + b.Spec.Insecure = false + return b +} + +func createBucket(ctx context.Context) { + if err := testMinioClient.Client.MakeBucket(ctx, bucketName, miniov7.MakeBucketOptions{}); err != nil { + exists, errBucketExists := testMinioClient.BucketExists(ctx, bucketName) + if errBucketExists == nil && exists { + deleteBucket(ctx) + } else { + log.Fatalf("could not create bucket: %s", err) + } + } +} + +func deleteBucket(ctx context.Context) { + if err := testMinioClient.Client.RemoveBucket(ctx, bucketName); err != nil { + log.Println(err) + } +} + +func addObjectToBucket(ctx context.Context) { + fileReader := strings.NewReader(getObjectFile()) + fileSize := fileReader.Size() + _, err := testMinioClient.Client.PutObject(ctx, bucketName, objectName, fileReader, fileSize, miniov7.PutObjectOptions{ + ContentType: "text/x-yaml", + }) + if err != nil { + log.Println(err) + } +} + +func removeObjectFromBucket(ctx context.Context) { + if err := testMinioClient.Client.RemoveObject(ctx, bucketName, objectName, miniov7.RemoveObjectOptions{ + GovernanceBypass: true, + }); err != nil { + log.Println(err) + } +} + +func getObjectFile() string { + return ` + apiVersion: source.toolkit.fluxcd.io/v1 + kind: Bucket + metadata: + name: podinfo + namespace: default + spec: + interval: 5m + provider: aws + bucketName: podinfo + endpoint: s3.amazonaws.com + region: us-east-1 + timeout: 30s + ` +} + +func loadServerCertAndClientTLSConfig() (serverCert string, serverKey string, clientConf *tls.Config, err error) { + const certsDir = "../../controller/testdata/certs" + clientConf = &tls.Config{} + + serverCert, err = filepath.Abs(filepath.Join(certsDir, "server.pem")) + if err != nil { + return "", "", nil, fmt.Errorf("failed to get server cert path: %w", err) + } + serverKey, err = filepath.Abs(filepath.Join(certsDir, "server-key.pem")) + if err != nil { + return "", "", nil, fmt.Errorf("failed to get server key path: %w", err) + } + + b, err := os.ReadFile(filepath.Join(certsDir, "ca.pem")) + if err != nil { + return "", "", nil, fmt.Errorf("failed to load CA: %w", err) + } + caPool := x509.NewCertPool() + if !caPool.AppendCertsFromPEM(b) { + return "", "", nil, errors.New("failed to append CA to pool") + } + clientConf.RootCAs = caPool + + clientCert := filepath.Join(certsDir, "client.pem") + clientKey := filepath.Join(certsDir, "client-key.pem") + client, err := tls.LoadX509KeyPair(clientCert, clientKey) + if err != nil { + return "", "", nil, fmt.Errorf("failed to load client cert and key: %w", err) + } + clientConf.Certificates = []tls.Certificate{client} + + return +} diff --git a/internal/cache/cache_test.go b/internal/cache/cache_test.go index 70d87c8ab..e6d3d6ac6 100644 --- a/internal/cache/cache_test.go +++ b/internal/cache/cache_test.go @@ -85,3 +85,55 @@ func TestCache(t *testing.T) { g.Expect(found).To(BeFalse()) g.Expect(item).To(BeNil()) } + +func TestCacheExpiration(t *testing.T) { + g := NewWithT(t) + cache := New(10, 0) + + key := "testKey" + value := "testValue" + expiration := 1 * time.Second + + err := cache.Add(key, value, expiration) + g.Expect(err).ToNot(HaveOccurred()) + + newExpiration := 2 * time.Second + cache.SetExpiration(key, newExpiration) + actualExpiration := cache.GetExpiration(key) + + g.Expect(actualExpiration).Should(BeNumerically("~", newExpiration, 100*time.Millisecond)) + + g.Expect(cache.HasExpired(key)).To(BeFalse()) + + time.Sleep(newExpiration + 100*time.Millisecond) + + g.Expect(cache.HasExpired(key)).To(BeTrue()) + + g.Expect(cache.GetExpiration(key)).To(BeZero()) + + nonExistentKey := "nonExistent" + cache.SetExpiration(nonExistentKey, 1*time.Second) + g.Expect(cache.GetExpiration(nonExistentKey)).To(BeZero()) + + g.Expect(cache.HasExpired(nonExistentKey)).To(BeTrue()) +} + +func TestCacheDeleteClear(t *testing.T) { + g := NewWithT(t) + cache := New(3, 0) + + err := cache.Add("key1", "value1", 0) + g.Expect(err).ToNot(HaveOccurred()) + err = cache.Add("key2", "value2", 0) + g.Expect(err).ToNot(HaveOccurred()) + err = cache.Add("key3", "value3", 0) + g.Expect(err).ToNot(HaveOccurred()) + + cache.Delete("key2") + _, found := cache.Get("key2") + g.Expect(found).To(BeFalse()) + g.Expect(cache.ItemCount()).To(Equal(2)) + + cache.Clear() + g.Expect(cache.ItemCount()).To(Equal(0)) +} diff --git a/internal/controller/artifact.go b/internal/controller/artifact.go index 0de6b3706..bebc8d5ae 100644 --- a/internal/controller/artifact.go +++ b/internal/controller/artifact.go @@ -16,9 +16,11 @@ limitations under the License. package controller -import sourcev1 "github.com/fluxcd/source-controller/api/v1" +import ( + "github.com/fluxcd/pkg/apis/meta" +) -type artifactSet []*sourcev1.Artifact +type artifactSet []*meta.Artifact // Diff returns true if any of the revisions in the artifactSet does not match any of the given artifacts. func (s artifactSet) Diff(set artifactSet) bool { diff --git a/internal/controller/artifact_matchers_test.go b/internal/controller/artifact_matchers_test.go index 39f0c9dd7..af716e086 100644 --- a/internal/controller/artifact_matchers_test.go +++ b/internal/controller/artifact_matchers_test.go @@ -19,24 +19,25 @@ package controller import ( "fmt" - sourcev1 "github.com/fluxcd/source-controller/api/v1" . "github.com/onsi/gomega" "github.com/onsi/gomega/types" + + "github.com/fluxcd/pkg/apis/meta" ) // MatchArtifact returns a custom matcher to check equality of a v1beta1.Artifact, the timestamp and URL are ignored. -func MatchArtifact(expected *sourcev1.Artifact) types.GomegaMatcher { +func MatchArtifact(expected *meta.Artifact) types.GomegaMatcher { return &matchArtifact{ expected: expected, } } type matchArtifact struct { - expected *sourcev1.Artifact + expected *meta.Artifact } func (m matchArtifact) Match(actual interface{}) (success bool, err error) { - actualArtifact, ok := actual.(*sourcev1.Artifact) + actualArtifact, ok := actual.(*meta.Artifact) if !ok { return false, fmt.Errorf("actual should be a pointer to an Artifact") } diff --git a/internal/controller/bucket_controller.go b/internal/controller/bucket_controller.go index c5c3267d2..7fe881be6 100644 --- a/internal/controller/bucket_controller.go +++ b/internal/controller/bucket_controller.go @@ -18,8 +18,10 @@ package controller import ( "context" + "crypto/tls" "errors" "fmt" + "net/url" "os" "path/filepath" "strings" @@ -32,34 +34,37 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" kuberecorder "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/ratelimiter" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" "github.com/fluxcd/pkg/apis/meta" + intdigest "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" + "github.com/fluxcd/pkg/cache" "github.com/fluxcd/pkg/runtime/conditions" helper "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/jitter" "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" rreconcile "github.com/fluxcd/pkg/runtime/reconcile" - - eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" + "github.com/fluxcd/pkg/runtime/secrets" "github.com/fluxcd/pkg/sourceignore" sourcev1 "github.com/fluxcd/source-controller/api/v1" - bucketv1 "github.com/fluxcd/source-controller/api/v1beta2" - intdigest "github.com/fluxcd/source-controller/internal/digest" + "github.com/fluxcd/source-controller/internal/bucket/azure" + "github.com/fluxcd/source-controller/internal/bucket/gcp" + "github.com/fluxcd/source-controller/internal/bucket/minio" serror "github.com/fluxcd/source-controller/internal/error" "github.com/fluxcd/source-controller/internal/index" sreconcile "github.com/fluxcd/source-controller/internal/reconcile" "github.com/fluxcd/source-controller/internal/reconcile/summarize" - "github.com/fluxcd/source-controller/pkg/azure" - "github.com/fluxcd/source-controller/pkg/gcp" - "github.com/fluxcd/source-controller/pkg/minio" ) // maxConcurrentBucketFetches is the upper bound on the goroutines used to @@ -75,7 +80,7 @@ import ( const maxConcurrentBucketFetches = 100 // bucketReadyCondition contains the information required to summarize a -// v1beta2.Bucket Ready Condition. +// v1.Bucket Ready Condition. var bucketReadyCondition = summarize.Conditions{ Target: meta.ReadyCondition, Owned: []string{ @@ -114,21 +119,24 @@ var bucketFailConditions = []string{ // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/status,verbs=get;update;patch // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/finalizers,verbs=get;create;update;patch;delete // +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=serviceaccounts/token,verbs=create -// BucketReconciler reconciles a v1beta2.Bucket object. +// BucketReconciler reconciles a v1.Bucket object. type BucketReconciler struct { client.Client kuberecorder.EventRecorder helper.Metrics - Storage *Storage + Storage *storage.Storage ControllerName string + TokenCache *cache.TokenCache patchOptions []patch.Option } type BucketReconcilerOptions struct { - RateLimiter ratelimiter.RateLimiter + RateLimiter workqueue.TypedRateLimiter[reconcile.Request] } // BucketProvider is an interface for fetching objects from a storage provider @@ -153,10 +161,19 @@ type BucketProvider interface { Close(context.Context) } -// bucketReconcileFunc is the function type for all the v1beta2.Bucket +// bucketCredentials contains all credentials and configuration needed for bucket providers. +type bucketCredentials struct { + secret *corev1.Secret + proxyURL *url.URL + tlsConfig *tls.Config + stsSecret *corev1.Secret + stsTLSConfig *tls.Config +} + +// bucketReconcileFunc is the function type for all the v1.Bucket // (sub)reconcile functions. The type implementations are grouped and // executed serially to perform the complete reconcile of the object. -type bucketReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *bucketv1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) +type bucketReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error { return r.SetupWithManagerAndOptions(mgr, BucketReconcilerOptions{}) @@ -166,7 +183,7 @@ func (r *BucketReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts Buc r.patchOptions = getPatchOptions(bucketReadyCondition.Owned, r.ControllerName) return ctrl.NewControllerManagedBy(mgr). - For(&bucketv1.Bucket{}). + For(&sourcev1.Bucket{}). WithEventFilter(predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{})). WithOptions(controller.Options{ RateLimiter: opts.RateLimiter, @@ -179,7 +196,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res log := ctrl.LoggerFrom(ctx) // Fetch the Bucket - obj := &bucketv1.Bucket{} + obj := &sourcev1.Bucket{} if err := r.Get(ctx, req.NamespacedName, obj); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } @@ -210,9 +227,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res } result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) - // Always record suspend, readiness and duration metrics. - r.Metrics.RecordSuspend(ctx, obj, obj.Spec.Suspend) - r.Metrics.RecordReadiness(ctx, obj) + // Always record duration metrics. r.Metrics.RecordDuration(ctx, obj, start) }() @@ -252,7 +267,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res // reconcile iterates through the bucketReconcileFunc tasks for the // object. It returns early on the first call that returns // reconcile.ResultRequeue, or produces an error. -func (r *BucketReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *bucketv1.Bucket, reconcilers []bucketReconcileFunc) (sreconcile.Result, error) { +func (r *BucketReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, reconcilers []bucketReconcileFunc) (sreconcile.Result, error) { oldObj := obj.DeepCopy() rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress") @@ -283,7 +298,7 @@ func (r *BucketReconciler) reconcile(ctx context.Context, sp *patch.SerialPatche fmt.Errorf("failed to create temporary working directory: %w", err), sourcev1.DirCreationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } defer func() { @@ -323,7 +338,7 @@ func (r *BucketReconciler) reconcile(ctx context.Context, sp *patch.SerialPatche } // notify emits notification related to the reconciliation. -func (r *BucketReconciler) notify(ctx context.Context, oldObj, newObj *bucketv1.Bucket, index *index.Digester, res sreconcile.Result, resErr error) { +func (r *BucketReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.Bucket, index *index.Digester, res sreconcile.Result, resErr error) { // Notify successful reconciliation for new artifact and recovery from any // failure. if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil { @@ -361,7 +376,7 @@ func (r *BucketReconciler) notify(ctx context.Context, oldObj, newObj *bucketv1. // condition is added. // The hostname of any URL in the Status of the object are updated, to ensure // they match the Storage server hostname of current runtime. -func (r *BucketReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, obj *bucketv1.Bucket, _ *index.Digester, _ string) (sreconcile.Result, error) { +func (r *BucketReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, _ *index.Digester, _ string) (sreconcile.Result, error) { // Garbage collect previous advertised artifact(s) from storage _ = r.garbageCollect(ctx, obj) @@ -399,7 +414,7 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, sp *patch.Seria if artifactMissing { msg += ": disappeared from storage" } - rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, msg) + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg) conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) @@ -418,92 +433,62 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, sp *patch.Seria // reconcileSource fetches the upstream bucket contents with the client for the // given object's Provider, and returns the result. // When a SecretRef is defined, it attempts to fetch the Secret before calling -// the provider. If this fails, it records v1beta2.FetchFailedCondition=True on +// the provider. If this fails, it records v1.FetchFailedCondition=True on // the object and returns early. -func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, obj *bucketv1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) { - secret, err := r.getBucketSecret(ctx, obj) +func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) { + usesObjectLevelWorkloadIdentity := obj.Spec.Provider != "" && obj.Spec.Provider != sourcev1.BucketProviderGeneric && obj.Spec.ServiceAccountName != "" + if usesObjectLevelWorkloadIdentity { + if !auth.IsObjectLevelWorkloadIdentityEnabled() { + const gate = auth.FeatureGateObjectLevelWorkloadIdentity + const msgFmt = "to use spec.serviceAccountName for provider authentication please enable the %s feature gate in the controller" + err := fmt.Errorf(msgFmt, gate) + e := serror.NewStalling(err, meta.FeatureGateDisabledReason) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + } + + creds, err := r.setupCredentials(ctx, obj) if err != nil { e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error()) - // Return error as the world as observed may change + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } - // Construct provider client - var provider BucketProvider - switch obj.Spec.Provider { - case bucketv1.GoogleBucketProvider: - if err = gcp.ValidateSecret(secret); err != nil { - e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error()) - return sreconcile.ResultEmpty, e - } - if provider, err = gcp.NewClient(ctx, secret); err != nil { - e := serror.NewGeneric(err, "ClientError") - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error()) - return sreconcile.ResultEmpty, e - } - case bucketv1.AzureBucketProvider: - if err = azure.ValidateSecret(secret); err != nil { - e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error()) - return sreconcile.ResultEmpty, e - } - if provider, err = azure.NewClient(obj, secret); err != nil { - e := serror.NewGeneric(err, "ClientError") - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error()) - return sreconcile.ResultEmpty, e - } - default: - if err = minio.ValidateSecret(secret); err != nil { + provider, err := r.createBucketProvider(ctx, obj, creds) + if err != nil { + var stallingErr *serror.Stalling + var genericErr *serror.Generic + if errors.As(err, &stallingErr) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, stallingErr.Reason, "%s", stallingErr) + return sreconcile.ResultEmpty, stallingErr + } else if errors.As(err, &genericErr) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, genericErr.Reason, "%s", genericErr) + return sreconcile.ResultEmpty, genericErr + } else { e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error()) - return sreconcile.ResultEmpty, e - } - if provider, err = minio.NewClient(obj, secret); err != nil { - e := serror.NewGeneric(err, "ClientError") - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } } - - // Fetch etag index - if err = fetchEtagIndex(ctx, provider, obj, index, dir); err != nil { - e := serror.NewGeneric(err, bucketv1.BucketOperationFailedReason) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error()) + changed, err := r.syncBucketArtifacts(ctx, provider, obj, index, dir) + if err != nil { + e := serror.NewGeneric(err, sourcev1.BucketOperationFailedReason) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } - // Check if index has changed compared to current Artifact revision. - var changed bool - if artifact := obj.Status.Artifact; artifact != nil && artifact.Revision != "" { - curRev := digest.Digest(artifact.Revision) - changed = curRev.Validate() != nil || curRev != index.Digest(curRev.Algorithm()) - } - - // Fetch the bucket objects if required to. - if artifact := obj.GetArtifact(); artifact == nil || changed { - // Mark observations about the revision on the object - defer func() { - // As fetchIndexFiles can make last-minute modifications to the etag - // index, we need to re-calculate the revision at the end - revision := index.Digest(intdigest.Canonical) - - message := fmt.Sprintf("new upstream revision '%s'", revision) - if obj.GetArtifact() != nil { - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) - } - rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message) - if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { - ctrl.LoggerFrom(ctx).Error(err, "failed to patch") - return - } - }() - - if err = fetchIndexFiles(ctx, provider, obj, index, dir); err != nil { - e := serror.NewGeneric(err, bucketv1.BucketOperationFailedReason) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error()) - return sreconcile.ResultEmpty, e + // Update artifact status if changes were detected + if changed { + revision := index.Digest(intdigest.Canonical) + message := fmt.Sprintf("new upstream revision '%s'", revision) + if obj.GetArtifact() != nil { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "%s", message) + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to patch") + return sreconcile.ResultEmpty, err } } @@ -515,12 +500,12 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.Serial // (Status) data on the object does not match the given. // // The inspection of the given data to the object is differed, ensuring any -// stale observations like v1beta2.ArtifactOutdatedCondition are removed. +// stale observations like v1.ArtifactOutdatedCondition are removed. // If the given Artifact does not differ from the object's current, it returns // early. // On a successful archive, the Artifact in the Status of the object is set, // and the symlink in the Storage is updated to its path. -func (r *BucketReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, obj *bucketv1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) { +func (r *BucketReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) { // Calculate revision revision := index.Digest(intdigest.Canonical) @@ -554,14 +539,14 @@ func (r *BucketReconciler) reconcileArtifact(ctx context.Context, sp *patch.Seri fmt.Errorf("failed to stat source path: %w", err), sourcev1.StatOperationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } else if !f.IsDir() { e := serror.NewGeneric( fmt.Errorf("source path '%s' is not a directory", dir), sourcev1.InvalidPathReason, ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } @@ -571,7 +556,7 @@ func (r *BucketReconciler) reconcileArtifact(ctx context.Context, sp *patch.Seri fmt.Errorf("failed to create artifact directory: %w", err), sourcev1.DirCreationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } unlock, err := r.Storage.Lock(artifact) @@ -589,7 +574,7 @@ func (r *BucketReconciler) reconcileArtifact(ctx context.Context, sp *patch.Seri fmt.Errorf("unable to archive artifact to storage: %s", err), sourcev1.ArchiveOperationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } @@ -613,7 +598,7 @@ func (r *BucketReconciler) reconcileArtifact(ctx context.Context, sp *patch.Seri // reconcileDelete handles the deletion of the object. // It first garbage collects all Artifacts for the object from the Storage. // Removing the finalizer from the object if successful. -func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *bucketv1.Bucket) (sreconcile.Result, error) { +func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (sreconcile.Result, error) { // Garbage collect the resource's artifacts if err := r.garbageCollect(ctx, obj); err != nil { // Return the error so we retry the failed garbage collection @@ -623,6 +608,10 @@ func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *bucketv1.Bu // Remove our finalizer from the list controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + // Cleanup caches. + r.TokenCache.DeleteEventsForObject(sourcev1.BucketKind, + obj.GetName(), obj.GetNamespace(), cache.OperationReconcile) + // Stop reconciliation as the object is being deleted return sreconcile.ResultEmpty, nil } @@ -632,7 +621,7 @@ func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *bucketv1.Bu // It removes all but the current Artifact from the Storage, unless the // deletion timestamp on the object is set. Which will result in the // removal of all Artifacts for the objects. -func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *bucketv1.Bucket) error { +func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Bucket) error { if !obj.DeletionTimestamp.IsZero() { if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { return serror.NewGeneric( @@ -656,30 +645,13 @@ func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *bucketv1.Buc } if len(delFiles) > 0 { r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", - fmt.Sprintf("garbage collected %d artifacts", len(delFiles))) + "garbage collected %d artifacts", len(delFiles)) return nil } } return nil } -// getBucketSecret attempts to fetch the Secret reference if specified on the -// obj. It returns any client error. -func (r *BucketReconciler) getBucketSecret(ctx context.Context, obj *bucketv1.Bucket) (*corev1.Secret, error) { - if obj.Spec.SecretRef == nil { - return nil, nil - } - secretName := types.NamespacedName{ - Namespace: obj.GetNamespace(), - Name: obj.Spec.SecretRef.Name, - } - secret := &corev1.Secret{} - if err := r.Get(ctx, secretName, secret); err != nil { - return nil, fmt.Errorf("failed to get secret '%s': %w", secretName.String(), err) - } - return secret, nil -} - // eventLogf records events, and logs at the same time. // // This log is different from the debug log in the EventRecorder, in the sense @@ -710,7 +682,7 @@ func (r *BucketReconciler) annotatedEventLogf(ctx context.Context, // bucket using the given provider, while filtering them using .sourceignore // rules. After fetching an object, the etag value in the index is updated to // the current value to ensure accuracy. -func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *bucketv1.Bucket, index *index.Digester, tempDir string) error { +func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *index.Digester, tempDir string) error { ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() @@ -728,7 +700,7 @@ func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *bucketv1. path := filepath.Join(tempDir, sourceignore.IgnoreFile) if _, err := provider.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil { if !provider.ObjectIsNotFound(err) { - return err + return fmt.Errorf("failed to get Etag for '%s' object: %w", sourceignore.IgnoreFile, serror.SanitizeError(err)) } } ps, err := sourceignore.ReadIgnoreFile(path, nil) @@ -764,7 +736,7 @@ func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *bucketv1. // using the given provider, and stores them into tempDir. It downloads in // parallel, but limited to the maxConcurrentBucketFetches. // Given an index is provided, the bucket is assumed to exist. -func fetchIndexFiles(ctx context.Context, provider BucketProvider, obj *bucketv1.Bucket, index *index.Digester, tempDir string) error { +func fetchIndexFiles(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *index.Digester, tempDir string) error { ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() @@ -792,7 +764,7 @@ func fetchIndexFiles(ctx context.Context, provider BucketProvider, obj *bucketv1 index.Delete(k) return nil } - return fmt.Errorf("failed to get '%s' object: %w", k, err) + return fmt.Errorf("failed to get '%s' object: %w", k, serror.SanitizeError(err)) } if t != etag { index.Add(k, etag) @@ -808,3 +780,206 @@ func fetchIndexFiles(ctx context.Context, provider BucketProvider, obj *bucketv1 return nil } + +// setupCredentials retrieves and validates secrets for authentication, TLS configuration, and proxy settings. +// It returns all credentials needed for bucket providers. +func (r *BucketReconciler) setupCredentials(ctx context.Context, obj *sourcev1.Bucket) (*bucketCredentials, error) { + var secret *corev1.Secret + if obj.Spec.SecretRef != nil { + secretName := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.SecretRef.Name, + } + secret = &corev1.Secret{} + if err := r.Get(ctx, secretName, secret); err != nil { + return nil, fmt.Errorf("failed to get secret '%s': %w", secretName, err) + } + } + + var stsSecret *corev1.Secret + if obj.Spec.STS != nil && obj.Spec.STS.SecretRef != nil { + secretName := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.STS.SecretRef.Name, + } + stsSecret = &corev1.Secret{} + if err := r.Get(ctx, secretName, stsSecret); err != nil { + return nil, fmt.Errorf("failed to get STS secret '%s': %w", secretName, err) + } + } + + var ( + err error + proxyURL *url.URL + tlsConfig *tls.Config + stsTLSConfig *tls.Config + ) + + if obj.Spec.ProxySecretRef != nil { + secretRef := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.ProxySecretRef.Name, + } + proxyURL, err = secrets.ProxyURLFromSecretRef(ctx, r.Client, secretRef) + if err != nil { + return nil, fmt.Errorf("failed to get proxy URL: %w", err) + } + } + + if obj.Spec.CertSecretRef != nil { + secretRef := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.CertSecretRef.Name, + } + tlsConfig, err = secrets.TLSConfigFromSecretRef(ctx, r.Client, secretRef, secrets.WithSystemCertPool()) + if err != nil { + return nil, fmt.Errorf("failed to get TLS config: %w", err) + } + } + + if obj.Spec.STS != nil && obj.Spec.STS.CertSecretRef != nil { + secretRef := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.STS.CertSecretRef.Name, + } + stsTLSConfig, err = secrets.TLSConfigFromSecretRef(ctx, r.Client, secretRef, secrets.WithSystemCertPool()) + if err != nil { + return nil, fmt.Errorf("failed to get STS TLS config: %w", err) + } + } + + return &bucketCredentials{ + secret: secret, + proxyURL: proxyURL, + tlsConfig: tlsConfig, + stsSecret: stsSecret, + stsTLSConfig: stsTLSConfig, + }, nil +} + +// createBucketProvider creates a provider-specific bucket client using the given credentials and configuration. +// It handles different bucket providers (AWS, GCP, Azure, generic) and returns the appropriate client. +func (r *BucketReconciler) createBucketProvider(ctx context.Context, obj *sourcev1.Bucket, creds *bucketCredentials) (BucketProvider, error) { + authOpts := []auth.Option{ + auth.WithClient(r.Client), + auth.WithServiceAccountNamespace(obj.GetNamespace()), + } + + if obj.Spec.ServiceAccountName != "" { + authOpts = append(authOpts, auth.WithServiceAccountName(obj.Spec.ServiceAccountName)) + } + + if r.TokenCache != nil { + involvedObject := cache.InvolvedObject{ + Kind: sourcev1.BucketKind, + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + Operation: cache.OperationReconcile, + } + authOpts = append(authOpts, auth.WithCache(*r.TokenCache, involvedObject)) + } + + if creds.proxyURL != nil { + authOpts = append(authOpts, auth.WithProxyURL(*creds.proxyURL)) + } + + if obj.Spec.Region != "" { + authOpts = append(authOpts, auth.WithSTSRegion(obj.Spec.Region)) + } + + if sts := obj.Spec.STS; sts != nil { + authOpts = append(authOpts, auth.WithSTSEndpoint(sts.Endpoint)) + } + + switch obj.Spec.Provider { + case sourcev1.BucketProviderGoogle: + var opts []gcp.Option + if creds.proxyURL != nil { + opts = append(opts, gcp.WithProxyURL(creds.proxyURL)) + } + + if creds.secret != nil { + if err := gcp.ValidateSecret(creds.secret); err != nil { + return nil, err + } + opts = append(opts, gcp.WithSecret(creds.secret)) + } else { + opts = append(opts, gcp.WithAuth(authOpts...)) + } + + return gcp.NewClient(ctx, obj, opts...) + + case sourcev1.BucketProviderAzure: + if err := azure.ValidateSecret(creds.secret); err != nil { + return nil, err + } + var opts []azure.Option + if creds.secret != nil { + opts = append(opts, azure.WithSecret(creds.secret)) + } + if creds.proxyURL != nil { + opts = append(opts, azure.WithProxyURL(creds.proxyURL)) + } + opts = append(opts, azure.WithAuth(authOpts...)) + return azure.NewClient(ctx, obj, opts...) + + default: + if err := minio.ValidateSecret(creds.secret); err != nil { + return nil, err + } + if sts := obj.Spec.STS; sts != nil { + if err := minio.ValidateSTSProvider(obj.Spec.Provider, sts); err != nil { + return nil, serror.NewStalling(err, sourcev1.InvalidSTSConfigurationReason) + } + if _, err := url.Parse(sts.Endpoint); err != nil { + return nil, serror.NewStalling(fmt.Errorf("failed to parse STS endpoint '%s': %w", sts.Endpoint, err), sourcev1.URLInvalidReason) + } + if err := minio.ValidateSTSSecret(sts.Provider, creds.stsSecret); err != nil { + return nil, serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) + } + } + var opts []minio.Option + if creds.secret != nil { + opts = append(opts, minio.WithSecret(creds.secret)) + } else if obj.Spec.Provider == sourcev1.BucketProviderAmazon { + opts = append(opts, minio.WithAuth(authOpts...)) + } + if creds.tlsConfig != nil { + opts = append(opts, minio.WithTLSConfig(creds.tlsConfig)) + } + if creds.proxyURL != nil { + opts = append(opts, minio.WithProxyURL(creds.proxyURL)) + } + if creds.stsSecret != nil { + opts = append(opts, minio.WithSTSSecret(creds.stsSecret)) + } + if creds.stsTLSConfig != nil { + opts = append(opts, minio.WithSTSTLSConfig(creds.stsTLSConfig)) + } + return minio.NewClient(ctx, obj, opts...) + } +} + +// syncBucketArtifacts handles etag index retrieval and bucket object fetching. +// It fetches the etag index from the provider and downloads objects to the specified directory. +// Returns true if changes were detected and artifacts were updated. +func (r *BucketReconciler) syncBucketArtifacts(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *index.Digester, dir string) (bool, error) { + if err := fetchEtagIndex(ctx, provider, obj, index, dir); err != nil { + return false, err + } + var changed bool + if artifact := obj.Status.Artifact; artifact != nil && artifact.Revision != "" { + curRev := digest.Digest(artifact.Revision) + changed = curRev.Validate() != nil || curRev != index.Digest(curRev.Algorithm()) + } + + // Fetch the bucket objects if required to. + if artifact := obj.GetArtifact(); artifact == nil || changed { + if err := fetchIndexFiles(ctx, provider, obj, index, dir); err != nil { + return false, err + } + return true, nil + } + + return false, nil +} diff --git a/internal/controller/bucket_controller_fetch_test.go b/internal/controller/bucket_controller_fetch_test.go index b31568ff8..707d645f3 100644 --- a/internal/controller/bucket_controller_fetch_test.go +++ b/internal/controller/bucket_controller_fetch_test.go @@ -24,10 +24,10 @@ import ( "testing" "time" - "gotest.tools/assert" + . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" "github.com/fluxcd/source-controller/internal/index" ) @@ -119,7 +119,8 @@ func Test_fetchEtagIndex(t *testing.T) { t.Fatal(err) } - assert.Equal(t, index.Len(), 3) + g := NewWithT(t) + g.Expect(index.Len()).To(Equal(3)) }) t.Run("an error while bucket does not exist", func(t *testing.T) { @@ -129,7 +130,9 @@ func Test_fetchEtagIndex(t *testing.T) { index := index.NewDigester() err := fetchEtagIndex(context.TODO(), client, bucket.DeepCopy(), index, tmp) - assert.ErrorContains(t, err, "not found") + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("not found")) }) t.Run("filters with .sourceignore rules", func(t *testing.T) { @@ -153,7 +156,8 @@ func Test_fetchEtagIndex(t *testing.T) { if ok := index.Has("foo.txt"); ok { t.Error(fmt.Errorf("expected 'foo.txt' index item to not exist")) } - assert.Equal(t, index.Len(), 1) + g := NewWithT(t) + g.Expect(index.Len()).To(Equal(1)) }) t.Run("filters with ignore rules from object", func(t *testing.T) { @@ -177,7 +181,8 @@ func Test_fetchEtagIndex(t *testing.T) { t.Error(err) } - assert.Equal(t, index.Len(), 1) + g := NewWithT(t) + g.Expect(index.Len()).To(Equal(1)) if ok := index.Has("foo.txt"); !ok { t.Error(fmt.Errorf("expected 'foo.txt' index item to exist")) } @@ -243,7 +248,8 @@ func Test_fetchFiles(t *testing.T) { t.Fatal(err) } f := index.Get("foo.yaml") - assert.Equal(t, f, "etag2") + g := NewWithT(t) + g.Expect(f).To(Equal("etag2")) }) t.Run("a disappeared index entry is removed from the index", func(t *testing.T) { @@ -262,8 +268,9 @@ func Test_fetchFiles(t *testing.T) { t.Fatal(err) } f := index.Get("foo.yaml") - assert.Equal(t, f, "etag1") - assert.Check(t, !index.Has("bar.yaml")) + g := NewWithT(t) + g.Expect(f).To(Equal("etag1")) + g.Expect(index.Has("bar.yaml")).To(BeFalse()) }) t.Run("can fetch more than maxConcurrentFetches", func(t *testing.T) { diff --git a/internal/controller/bucket_controller_test.go b/internal/controller/bucket_controller_test.go index 2dd23dd20..00ed46cb7 100644 --- a/internal/controller/bucket_controller_test.go +++ b/internal/controller/bucket_controller_test.go @@ -38,14 +38,15 @@ import ( kstatus "github.com/fluxcd/cli-utils/pkg/kstatus/status" "github.com/fluxcd/pkg/apis/meta" + intdigest "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" "github.com/fluxcd/pkg/runtime/conditions" conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check" "github.com/fluxcd/pkg/runtime/jitter" "github.com/fluxcd/pkg/runtime/patch" sourcev1 "github.com/fluxcd/source-controller/api/v1" - bucketv1 "github.com/fluxcd/source-controller/api/v1beta2" - intdigest "github.com/fluxcd/source-controller/internal/digest" "github.com/fluxcd/source-controller/internal/index" gcsmock "github.com/fluxcd/source-controller/internal/mock/gcs" s3mock "github.com/fluxcd/source-controller/internal/mock/s3" @@ -68,10 +69,10 @@ func TestBucketReconciler_deleteBeforeFinalizer(t *testing.T) { g.Expect(k8sClient.Delete(ctx, namespace)).NotTo(HaveOccurred()) }) - bucket := &bucketv1.Bucket{} + bucket := &sourcev1.Bucket{} bucket.Name = "test-bucket" bucket.Namespace = namespaceName - bucket.Spec = bucketv1.BucketSpec{ + bucket.Spec = sourcev1.BucketSpec{ Interval: metav1.Duration{Duration: interval}, BucketName: "foo", Endpoint: "bar", @@ -124,12 +125,12 @@ func TestBucketReconciler_Reconcile(t *testing.T) { g.Expect(testEnv.Create(ctx, secret)).To(Succeed()) defer testEnv.Delete(ctx, secret) - origObj := &bucketv1.Bucket{ + origObj := &sourcev1.Bucket{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "bucket-reconcile-", Namespace: "default", }, - Spec: bucketv1.BucketSpec{ + Spec: sourcev1.BucketSpec{ Provider: "generic", BucketName: s3Server.BucketName, Endpoint: u.Host, @@ -197,20 +198,20 @@ func TestBucketReconciler_Reconcile(t *testing.T) { func TestBucketReconciler_reconcileStorage(t *testing.T) { tests := []struct { name string - beforeFunc func(obj *bucketv1.Bucket, storage *Storage) error + beforeFunc func(obj *sourcev1.Bucket, storage *storage.Storage) error want sreconcile.Result wantErr bool - assertArtifact *sourcev1.Artifact + assertArtifact *meta.Artifact assertConditions []metav1.Condition assertPaths []string }{ { name: "garbage collects", - beforeFunc: func(obj *bucketv1.Bucket, storage *Storage) error { + beforeFunc: func(obj *sourcev1.Bucket, storage *storage.Storage) error { revisions := []string{"a", "b", "c", "d"} for n := range revisions { v := revisions[n] - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), Revision: v, } @@ -228,7 +229,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") return nil }, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Path: "/reconcile-storage/d.txt", Revision: "d", Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", @@ -256,8 +257,8 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { }, { name: "notices missing artifact in storage", - beforeFunc: func(obj *bucketv1.Bucket, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.Bucket, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ Path: "/reconcile-storage/invalid.txt", Revision: "d", } @@ -275,10 +276,10 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { }, { name: "notices empty artifact digest", - beforeFunc: func(obj *bucketv1.Bucket, storage *Storage) error { + beforeFunc: func(obj *sourcev1.Bucket, storage *storage.Storage) error { f := "empty-digest.txt" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), Revision: "fake", } @@ -306,10 +307,10 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { }, { name: "notices artifact digest mismatch", - beforeFunc: func(obj *bucketv1.Bucket, storage *Storage) error { + beforeFunc: func(obj *sourcev1.Bucket, storage *storage.Storage) error { f := "digest-mismatch.txt" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), Revision: "fake", } @@ -337,8 +338,8 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { }, { name: "updates hostname on diff from current", - beforeFunc: func(obj *bucketv1.Bucket, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.Bucket, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ Path: "/reconcile-storage/hostname.txt", Revision: "f", Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", @@ -357,7 +358,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { assertPaths: []string{ "/reconcile-storage/hostname.txt", }, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Path: "/reconcile-storage/hostname.txt", Revision: "f", Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", @@ -380,14 +381,14 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { r := &BucketReconciler{ Client: fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&bucketv1.Bucket{}). + WithStatusSubresource(&sourcev1.Bucket{}). Build(), EventRecorder: record.NewFakeRecorder(32), Storage: testStorage, patchOptions: getPatchOptions(bucketReadyCondition.Owned, "sc"), } - obj := &bucketv1.Bucket{ + obj := &sourcev1.Bucket{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test-", Generation: 1, @@ -438,14 +439,15 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { bucketObjects []*s3mock.Object middleware http.Handler secret *corev1.Secret - beforeFunc func(obj *bucketv1.Bucket) + serviceAccount *corev1.ServiceAccount + beforeFunc func(obj *sourcev1.Bucket) want sreconcile.Result wantErr bool assertIndex *index.Digester assertConditions []metav1.Condition }{ { - name: "Reconciles GCS source", + name: "Reconciles generic source", bucketName: "dummy", bucketObjects: []*s3mock.Object{ { @@ -472,7 +474,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { { name: "Observes non-existing secretRef", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.SecretRef = &meta.LocalObjectReference{ Name: "dummy", } @@ -495,7 +497,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { Name: "dummy", }, }, - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.SecretRef = &meta.LocalObjectReference{ Name: "dummy", } @@ -510,10 +512,181 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, }, + { + name: "Observes non-existing certSecretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.CertSecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get TLS config: secret '/dummy' not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes invalid certSecretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.CertSecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get TLS config: secret '/dummy' must contain either 'ca.crt' or both 'tls.crt' and 'tls.key'"), + }, + }, + { + name: "Observes non-existing proxySecretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.ProxySecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get proxy URL: secret '/dummy' not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes invalid proxySecretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + Data: map[string][]byte{}, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.ProxySecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get proxy URL: secret '/dummy': key 'address' not found"), + }, + }, + { + name: "Observes non-existing sts.secretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.STS = &sourcev1.BucketSTSSpec{ + SecretRef: &meta.LocalObjectReference{Name: "dummy"}, + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get STS secret '/dummy': secrets \"dummy\" not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes invalid sts.secretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Provider = "generic" + obj.Spec.STS = &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: "https://something", + SecretRef: &meta.LocalObjectReference{Name: "dummy"}, + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "invalid 'dummy' secret data for 'ldap' STS provider: required fields username, password"), + }, + }, + { + name: "Observes non-existing sts.certSecretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.STS = &sourcev1.BucketSTSSpec{ + CertSecretRef: &meta.LocalObjectReference{Name: "dummy"}, + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get STS TLS config: secret '/dummy' not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes invalid sts.certSecretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Provider = "generic" + obj.Spec.STS = &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: "https://something", + CertSecretRef: &meta.LocalObjectReference{Name: "dummy"}, + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get STS TLS config: secret '/dummy' must contain either 'ca.crt' or both 'tls.crt' and 'tls.key'"), + }, + }, { name: "Observes non-existing bucket name", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.BucketName = "invalid" conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") @@ -521,14 +694,53 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { wantErr: true, assertIndex: index.NewDigester(), assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, bucketv1.BucketOperationFailedReason, "bucket 'invalid' not found"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "bucket 'invalid' not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes incompatible sts.provider", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Provider = "generic" + obj.Spec.STS = &sourcev1.BucketSTSSpec{ + Provider: "aws", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.InvalidSTSConfigurationReason, "STS provider 'aws' is not supported for 'generic' bucket provider"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes invalid sts.endpoint", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Provider = "generic" + obj.Spec.STS = &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: "something\t", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "failed to parse STS endpoint 'something\t': parse \"something\\t\": net/url: invalid control character in URL"), *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, }, { name: "Transient bucket name API failure", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.Endpoint = "transient.example.com" obj.Spec.BucketName = "unavailable" conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") @@ -537,7 +749,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { wantErr: true, assertIndex: index.NewDigester(), assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, bucketv1.BucketOperationFailedReason, "failed to confirm existence of 'unavailable' bucket"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to confirm existence of 'unavailable' bucket"), *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, @@ -577,7 +789,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { { name: "spec.ignore overrides .sourceignore", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { ignore := "!ignored/file.txt" obj.Spec.Ignore = &ignore }, @@ -614,8 +826,8 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { { name: "Up-to-date artifact", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{ Revision: "sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479", } conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") @@ -641,8 +853,8 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { { name: "Removes FetchFailedCondition after reconciling source", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, bucketv1.BucketOperationFailedReason, "failed to read test file") + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to read test file") }, bucketObjects: []*s3mock.Object{ { @@ -672,8 +884,8 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { LastModified: time.Now(), }, }, - beforeFunc: func(obj *bucketv1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{ Path: "some-path", Revision: "some-rev", } @@ -695,12 +907,16 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.Scheme()). - WithStatusSubresource(&bucketv1.Bucket{}) + WithStatusSubresource(&sourcev1.Bucket{}) if tt.secret != nil { clientBuilder.WithObjects(tt.secret) } + if tt.serviceAccount != nil { + clientBuilder.WithObjects(tt.serviceAccount) + } + r := &BucketReconciler{ EventRecorder: record.NewFakeRecorder(32), Client: clientBuilder.Build(), @@ -709,12 +925,12 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { } tmpDir := t.TempDir() - obj := &bucketv1.Bucket{ + obj := &sourcev1.Bucket{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test-bucket-", Generation: 1, }, - Spec: bucketv1.BucketSpec{ + Spec: sourcev1.BucketSpec{ Timeout: &metav1.Duration{Duration: timeout}, }, } @@ -763,15 +979,17 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { tests := []struct { - name string - bucketName string - bucketObjects []*gcsmock.Object - secret *corev1.Secret - beforeFunc func(obj *bucketv1.Bucket) - want sreconcile.Result - wantErr bool - assertIndex *index.Digester - assertConditions []metav1.Condition + name string + bucketName string + bucketObjects []*gcsmock.Object + secret *corev1.Secret + serviceAccount *corev1.ServiceAccount + beforeFunc func(obj *sourcev1.Bucket) + want sreconcile.Result + wantErr bool + assertIndex *index.Digester + assertConditions []metav1.Condition + disableObjectLevelWorkloadIdentity bool }{ { name: "Reconciles GCS source", @@ -794,7 +1012,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { "serviceaccount": []byte("testsa"), }, }, - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.SecretRef = &meta.LocalObjectReference{ Name: "dummy", } @@ -811,7 +1029,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { { name: "Observes non-existing secretRef", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.SecretRef = &meta.LocalObjectReference{ Name: "dummy", } @@ -835,7 +1053,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { Name: "dummy", }, }, - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.SecretRef = &meta.LocalObjectReference{ Name: "dummy", } @@ -851,10 +1069,53 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, }, + { + name: "Observes non-existing proxySecretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.ProxySecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get proxy URL: secret '/dummy' not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, + { + name: "Observes invalid proxySecretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.ProxySecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get proxy URL: secret '/dummy': key 'address' not found"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + }, { name: "Observes non-existing bucket name", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.BucketName = "invalid" conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") @@ -863,14 +1124,14 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { wantErr: true, assertIndex: index.NewDigester(), assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, bucketv1.BucketOperationFailedReason, "bucket 'invalid' not found"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "bucket 'invalid' not found"), *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, }, { name: "Transient bucket name API failure", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.Endpoint = "transient.example.com" obj.Spec.BucketName = "unavailable" conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") @@ -880,7 +1141,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { wantErr: true, assertIndex: index.NewDigester(), assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, bucketv1.BucketOperationFailedReason, "failed to confirm existence of 'unavailable' bucket"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to confirm existence of 'unavailable' bucket"), *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, @@ -920,7 +1181,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { { name: "spec.ignore overrides .sourceignore", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { ignore := "!ignored/file.txt" obj.Spec.Ignore = &ignore }, @@ -957,8 +1218,8 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { { name: "Up-to-date artifact", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{ Revision: "sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479", } conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") @@ -984,8 +1245,8 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { { name: "Removes FetchFailedCondition after reconciling source", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, bucketv1.BucketOperationFailedReason, "failed to read test file") + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to read test file") }, bucketObjects: []*gcsmock.Object{ { @@ -1015,8 +1276,8 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { Generation: 3, }, }, - beforeFunc: func(obj *bucketv1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{ Path: "some-path", Revision: "some-rev", } @@ -1031,6 +1292,80 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), }, }, + { + name: "GCS Object-Level Workload Identity (no secret)", + bucketName: "dummy", + bucketObjects: []*gcsmock.Object{ + { + Key: "test.txt", + ContentType: "text/plain", + Content: []byte("test"), + Generation: 3, + }, + }, + serviceAccount: &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sa", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.ServiceAccountName = "test-sa" + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + }, + }, + { + name: "GCS Controller-Level Workload Identity (no secret, no SA)", + bucketName: "dummy", + bucketObjects: []*gcsmock.Object{ + { + Key: "test.txt", + ContentType: "text/plain", + Content: []byte("test"), + Generation: 3, + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + // ServiceAccountName は設定しない (Controller-Level) + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + }, + }, + { + name: "GCS Object-Level fails when feature gate disabled", + bucketName: "dummy", + serviceAccount: &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sa", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.ServiceAccountName = "test-sa" + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FeatureGateDisabledReason, "to use spec.serviceAccountName for provider authentication please enable the ObjectLevelWorkloadIdentity feature gate in the controller"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + disableObjectLevelWorkloadIdentity: true, + }, // TODO: Middleware for mock server to test authentication using secret. } for _, tt := range tests { @@ -1039,30 +1374,41 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.Scheme()). - WithStatusSubresource(&bucketv1.Bucket{}) + WithStatusSubresource(&sourcev1.Bucket{}) if tt.secret != nil { clientBuilder.WithObjects(tt.secret) } + if tt.serviceAccount != nil { + clientBuilder.WithObjects(tt.serviceAccount) + } + r := &BucketReconciler{ EventRecorder: record.NewFakeRecorder(32), Client: clientBuilder.Build(), Storage: testStorage, patchOptions: getPatchOptions(bucketReadyCondition.Owned, "sc"), } + + // Handle ObjectLevelWorkloadIdentity feature gate + if !tt.disableObjectLevelWorkloadIdentity { + auth.EnableObjectLevelWorkloadIdentity() + t.Cleanup(auth.DisableObjectLevelWorkloadIdentity) + } + tmpDir := t.TempDir() // Test bucket object. - obj := &bucketv1.Bucket{ + obj := &sourcev1.Bucket{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test-bucket-", Generation: 1, }, - Spec: bucketv1.BucketSpec{ + Spec: sourcev1.BucketSpec{ BucketName: tt.bucketName, Timeout: &metav1.Duration{Duration: timeout}, - Provider: bucketv1.GoogleBucketProvider, + Provider: "gcp", }, } @@ -1096,7 +1442,11 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { sp := patch.NewSerialPatcher(obj, r.Client) got, err := r.reconcileSource(context.TODO(), sp, obj, index, tmpDir) - g.Expect(err != nil).To(Equal(tt.wantErr)) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } g.Expect(got).To(Equal(tt.want)) g.Expect(index.Index()).To(Equal(tt.assertIndex.Index())) @@ -1112,15 +1462,15 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { func TestBucketReconciler_reconcileArtifact(t *testing.T) { tests := []struct { name string - beforeFunc func(t *WithT, obj *bucketv1.Bucket, index *index.Digester, dir string) - afterFunc func(t *WithT, obj *bucketv1.Bucket, dir string) + beforeFunc func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) + afterFunc func(t *WithT, obj *sourcev1.Bucket, dir string) want sreconcile.Result wantErr bool assertConditions []metav1.Condition }{ { name: "Archiving artifact to storage makes ArtifactInStorage=True", - beforeFunc: func(t *WithT, obj *bucketv1.Bucket, index *index.Digester, dir string) { + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { obj.Spec.Interval = metav1.Duration{Duration: interval} conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") @@ -1134,15 +1484,15 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { }, { name: "Up-to-date artifact should not persist and update status", - beforeFunc: func(t *WithT, obj *bucketv1.Bucket, index *index.Digester, dir string) { + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { revision := index.Digest(intdigest.Canonical) obj.Spec.Interval = metav1.Duration{Duration: interval} // Incomplete artifact - obj.Status.Artifact = &sourcev1.Artifact{Revision: revision.String()} + obj.Status.Artifact = &meta.Artifact{Revision: revision.String()} conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") }, - afterFunc: func(t *WithT, obj *bucketv1.Bucket, dir string) { + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { // Still incomplete t.Expect(obj.Status.URL).To(BeEmpty()) }, @@ -1155,7 +1505,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { }, { name: "Removes ArtifactOutdatedCondition after creating a new artifact", - beforeFunc: func(t *WithT, obj *bucketv1.Bucket, index *index.Digester, dir string) { + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { obj.Spec.Interval = metav1.Duration{Duration: interval} conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") @@ -1170,12 +1520,12 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { }, { name: "Creates latest symlink to the created artifact", - beforeFunc: func(t *WithT, obj *bucketv1.Bucket, index *index.Digester, dir string) { + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { obj.Spec.Interval = metav1.Duration{Duration: interval} conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") }, - afterFunc: func(t *WithT, obj *bucketv1.Bucket, dir string) { + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { localPath := testStorage.LocalPath(*obj.GetArtifact()) symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz") targetFile, err := os.Readlink(symlinkPath) @@ -1191,7 +1541,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { }, { name: "Dir path deleted", - beforeFunc: func(t *WithT, obj *bucketv1.Bucket, index *index.Digester, dir string) { + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") @@ -1206,7 +1556,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { }, { name: "Dir path is not a directory", - beforeFunc: func(t *WithT, obj *bucketv1.Bucket, index *index.Digester, dir string) { + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { // Remove the given directory and create a file for the same // path. t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) @@ -1216,7 +1566,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") }, - afterFunc: func(t *WithT, obj *bucketv1.Bucket, dir string) { + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) }, want: sreconcile.ResultEmpty, @@ -1235,7 +1585,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&bucketv1.Bucket{}) + WithStatusSubresource(&sourcev1.Bucket{}) r := &BucketReconciler{ Client: clientBuilder.Build(), @@ -1244,13 +1594,12 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { patchOptions: getPatchOptions(bucketReadyCondition.Owned, "sc"), } - obj := &bucketv1.Bucket{ + obj := &sourcev1.Bucket{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test-bucket-", Generation: 1, - Namespace: "default", }, - Spec: bucketv1.BucketSpec{ + Spec: sourcev1.BucketSpec{ Timeout: &metav1.Duration{Duration: timeout}, }, } @@ -1291,13 +1640,13 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { func TestBucketReconciler_statusConditions(t *testing.T) { tests := []struct { name string - beforeFunc func(obj *bucketv1.Bucket) + beforeFunc func(obj *sourcev1.Bucket) assertConditions []metav1.Condition wantErr bool }{ { name: "positive conditions only", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") }, assertConditions: []metav1.Condition{ @@ -1307,7 +1656,7 @@ func TestBucketReconciler_statusConditions(t *testing.T) { }, { name: "multiple failures", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory") conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error") @@ -1322,7 +1671,7 @@ func TestBucketReconciler_statusConditions(t *testing.T) { }, { name: "mixed positive and negative conditions", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") }, @@ -1339,10 +1688,10 @@ func TestBucketReconciler_statusConditions(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - obj := &bucketv1.Bucket{ + obj := &sourcev1.Bucket{ TypeMeta: metav1.TypeMeta{ - APIVersion: bucketv1.GroupVersion.String(), - Kind: bucketv1.BucketKind, + APIVersion: sourcev1.GroupVersion.String(), + Kind: sourcev1.BucketKind, }, ObjectMeta: metav1.ObjectMeta{ Name: "test-bucket", @@ -1353,7 +1702,7 @@ func TestBucketReconciler_statusConditions(t *testing.T) { c := fakeclient.NewClientBuilder(). WithScheme(testEnv.Scheme()). WithObjects(obj). - WithStatusSubresource(&bucketv1.Bucket{}). + WithStatusSubresource(&sourcev1.Bucket{}). Build() serialPatcher := patch.NewSerialPatcher(obj, c) @@ -1388,8 +1737,8 @@ func TestBucketReconciler_notify(t *testing.T) { name string res sreconcile.Result resErr error - oldObjBeforeFunc func(obj *bucketv1.Bucket) - newObjBeforeFunc func(obj *bucketv1.Bucket) + oldObjBeforeFunc func(obj *sourcev1.Bucket) + newObjBeforeFunc func(obj *sourcev1.Bucket) wantEvent string }{ { @@ -1401,8 +1750,8 @@ func TestBucketReconciler_notify(t *testing.T) { name: "new artifact", res: sreconcile.ResultSuccess, resErr: nil, - newObjBeforeFunc: func(obj *bucketv1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + newObjBeforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} }, wantEvent: "Normal NewArtifact stored artifact with 2 fetched files from", }, @@ -1410,13 +1759,13 @@ func TestBucketReconciler_notify(t *testing.T) { name: "recovery from failure", res: sreconcile.ResultSuccess, resErr: nil, - oldObjBeforeFunc: func(obj *bucketv1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + oldObjBeforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, - newObjBeforeFunc: func(obj *bucketv1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + newObjBeforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, wantEvent: "Normal Succeeded stored artifact with 2 fetched files from", @@ -1425,13 +1774,13 @@ func TestBucketReconciler_notify(t *testing.T) { name: "recovery and new artifact", res: sreconcile.ResultSuccess, resErr: nil, - oldObjBeforeFunc: func(obj *bucketv1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + oldObjBeforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, - newObjBeforeFunc: func(obj *bucketv1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Digest: "bbb"} + newObjBeforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{Revision: "aaa", Digest: "bbb"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, wantEvent: "Normal NewArtifact stored artifact with 2 fetched files from", @@ -1440,12 +1789,12 @@ func TestBucketReconciler_notify(t *testing.T) { name: "no updates", res: sreconcile.ResultSuccess, resErr: nil, - oldObjBeforeFunc: func(obj *bucketv1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + oldObjBeforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, - newObjBeforeFunc: func(obj *bucketv1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + newObjBeforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, }, @@ -1457,8 +1806,8 @@ func TestBucketReconciler_notify(t *testing.T) { recorder := record.NewFakeRecorder(32) - oldObj := &bucketv1.Bucket{ - Spec: bucketv1.BucketSpec{ + oldObj := &sourcev1.Bucket{ + Spec: sourcev1.BucketSpec{ BucketName: "test-bucket", }, } @@ -1495,3 +1844,164 @@ func TestBucketReconciler_notify(t *testing.T) { }) } } + +func TestBucketReconciler_APIServerValidation_STS(t *testing.T) { + tests := []struct { + name string + bucketProvider string + stsConfig *sourcev1.BucketSTSSpec + err string + }{ + { + name: "gcp unsupported", + bucketProvider: "gcp", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "aws", + Endpoint: "http://test", + }, + err: "STS configuration is only supported for the 'aws' and 'generic' Bucket providers", + }, + { + name: "azure unsupported", + bucketProvider: "azure", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "aws", + Endpoint: "http://test", + }, + err: "STS configuration is only supported for the 'aws' and 'generic' Bucket providers", + }, + { + name: "aws supported", + bucketProvider: "aws", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "aws", + Endpoint: "http://test", + }, + }, + { + name: "invalid endpoint", + bucketProvider: "aws", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "aws", + Endpoint: "test", + }, + err: "spec.sts.endpoint in body should match '^(http|https)://.*$'", + }, + { + name: "gcp can be created without STS config", + bucketProvider: "gcp", + }, + { + name: "azure can be created without STS config", + bucketProvider: "azure", + }, + { + name: "generic can be created without STS config", + bucketProvider: "generic", + }, + { + name: "aws can be created without STS config", + bucketProvider: "aws", + }, + { + name: "ldap unsupported for aws", + bucketProvider: "aws", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: "http://test", + }, + err: "'aws' is the only supported STS provider for the 'aws' Bucket provider", + }, + { + name: "aws unsupported for generic", + bucketProvider: "generic", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "aws", + Endpoint: "http://test", + }, + err: "'ldap' is the only supported STS provider for the 'generic' Bucket provider", + }, + { + name: "aws does not require a secret", + bucketProvider: "aws", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "aws", + Endpoint: "http://test", + SecretRef: &meta.LocalObjectReference{}, + }, + err: "spec.sts.secretRef is not required for the 'aws' STS provider", + }, + { + name: "aws does not require a cert secret", + bucketProvider: "aws", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "aws", + Endpoint: "http://test", + CertSecretRef: &meta.LocalObjectReference{}, + }, + err: "spec.sts.certSecretRef is not required for the 'aws' STS provider", + }, + { + name: "ldap may use a secret", + bucketProvider: "generic", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: "http://test", + SecretRef: &meta.LocalObjectReference{}, + }, + }, + { + name: "ldap may use a cert secret", + bucketProvider: "generic", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: "http://test", + CertSecretRef: &meta.LocalObjectReference{}, + }, + }, + { + name: "ldap may not use a secret or cert secret", + bucketProvider: "generic", + stsConfig: &sourcev1.BucketSTSSpec{ + Provider: "ldap", + Endpoint: "http://test", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "bucket-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.BucketSpec{ + Provider: tt.bucketProvider, + BucketName: "test", + Endpoint: "test", + Suspend: true, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + STS: tt.stsConfig, + }, + } + + err := testEnv.Create(ctx, obj) + if err == nil { + defer func() { + err := testEnv.Delete(ctx, obj) + g.Expect(err).NotTo(HaveOccurred()) + }() + } + + if tt.err != "" { + g.Expect(err.Error()).To(ContainSubstring(tt.err)) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + }) + } +} diff --git a/internal/controller/gitrepository_controller.go b/internal/controller/gitrepository_controller.go index 2440904a2..1208c8ae0 100644 --- a/internal/controller/gitrepository_controller.go +++ b/internal/controller/gitrepository_controller.go @@ -27,12 +27,17 @@ import ( "time" securejoin "github.com/cyphar/filepath-securejoin" + "github.com/fluxcd/pkg/auth" + authutils "github.com/fluxcd/pkg/auth/utils" + "github.com/fluxcd/pkg/git/github" "github.com/fluxcd/pkg/runtime/logger" + "github.com/fluxcd/pkg/runtime/secrets" "github.com/go-git/go-git/v5/plumbing/transport" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" kuberecorder "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -40,10 +45,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/ratelimiter" + "sigs.k8s.io/controller-runtime/pkg/reconcile" eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/cache" "github.com/fluxcd/pkg/git" "github.com/fluxcd/pkg/git/gogit" "github.com/fluxcd/pkg/git/repository" @@ -53,7 +60,6 @@ import ( "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" rreconcile "github.com/fluxcd/pkg/runtime/reconcile" - "github.com/fluxcd/pkg/sourceignore" sourcev1 "github.com/fluxcd/source-controller/api/v1" @@ -65,7 +71,7 @@ import ( ) // gitRepositoryReadyCondition contains the information required to summarize a -// v1beta2.GitRepository Ready Condition. +// v1.GitRepository Ready Condition. var gitRepositoryReadyCondition = summarize.Conditions{ Target: meta.ReadyCondition, Owned: []string{ @@ -120,14 +126,15 @@ func getPatchOptions(ownedConditions []string, controllerName string) []patch.Op // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/finalizers,verbs=get;create;update;patch;delete // +kubebuilder:rbac:groups="",resources=events,verbs=create;patch -// GitRepositoryReconciler reconciles a v1beta2.GitRepository object. +// GitRepositoryReconciler reconciles a v1.GitRepository object. type GitRepositoryReconciler struct { client.Client kuberecorder.EventRecorder helper.Metrics - Storage *Storage + Storage *storage.Storage ControllerName string + TokenCache *cache.TokenCache requeueDependency time.Duration features map[string]bool @@ -137,11 +144,11 @@ type GitRepositoryReconciler struct { type GitRepositoryReconcilerOptions struct { DependencyRequeueInterval time.Duration - RateLimiter ratelimiter.RateLimiter + RateLimiter workqueue.TypedRateLimiter[reconcile.Request] } // gitRepositoryReconcileFunc is the function type for all the -// v1beta2.GitRepository (sub)reconcile functions. +// v1.GitRepository (sub)reconcile functions. type gitRepositoryReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) func (r *GitRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error { @@ -204,9 +211,7 @@ func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques } result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) - // Always record suspend, readiness and duration metrics. - r.Metrics.RecordSuspend(ctx, obj, obj.Spec.Suspend) - r.Metrics.RecordReadiness(ctx, obj) + // Always record duration metrics. r.Metrics.RecordDuration(ctx, obj, start) }() @@ -279,7 +284,7 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, sp *patch.Seria fmt.Errorf("failed to create temporary working directory: %w", err), sourcev1.DirCreationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } defer func() { @@ -422,7 +427,7 @@ func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patc if artifactMissing { msg += ": disappeared from storage" } - rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, msg) + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg) conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) @@ -443,23 +448,23 @@ func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patc // // The included repositories are fetched and their metadata are stored. In case // one of the included repositories isn't ready, it records -// v1beta2.IncludeUnavailableCondition=True and returns early. When all the +// v1.IncludeUnavailableCondition=True and returns early. When all the // included repositories are ready, it removes -// v1beta2.IncludeUnavailableCondition from the object. +// v1.IncludeUnavailableCondition from the object. // When the included artifactSet differs from the current set in the Status of -// the object, it marks the object with v1beta2.ArtifactOutdatedCondition=True. +// the object, it marks the object with v1.ArtifactOutdatedCondition=True. // The repository is cloned to the given dir, using the specified configuration // to check out the reference. In case of an error during this process -// (including transient errors), it records v1beta2.FetchFailedCondition=True +// (including transient errors), it records v1.FetchFailedCondition=True // and returns early. -// On a successful checkout, it removes v1beta2.FetchFailedCondition and +// On a successful checkout, it removes v1.FetchFailedCondition and // compares the current revision of HEAD to the revision of the Artifact in the -// Status of the object. It records v1beta2.ArtifactOutdatedCondition=True when +// Status of the object. It records v1.ArtifactOutdatedCondition=True when // they differ. // If specified, the signature of the Git commit is verified. If the signature // can not be verified or the verification fails, it records -// v1beta2.SourceVerifiedCondition=False and returns early. When successful, -// it records v1beta2.SourceVerifiedCondition=True. +// v1.SourceVerifiedCondition=False and returns early. When successful, +// it records v1.SourceVerifiedCondition=True. // When all the above is successful, the given Commit pointer is set to the // commit of the checked out Git repository. // @@ -478,18 +483,24 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch } var proxyOpts *transport.ProxyOptions + var proxyURL *url.URL if obj.Spec.ProxySecretRef != nil { var err error - proxyOpts, err = r.getProxyOpts(ctx, obj.Spec.ProxySecretRef.Name, obj.GetNamespace()) + secretRef := types.NamespacedName{ + Name: obj.Spec.ProxySecretRef.Name, + Namespace: obj.GetNamespace(), + } + proxyURL, err = secrets.ProxyURLFromSecretRef(ctx, r.Client, secretRef) if err != nil { e := serror.NewGeneric( fmt.Errorf("failed to configure proxy options: %w", err), sourcev1.AuthenticationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) // Return error as the world as observed may change return sreconcile.ResultEmpty, e } + proxyOpts = &transport.ProxyOptions{URL: proxyURL.String()} } u, err := url.Parse(obj.Spec.URL) @@ -498,19 +509,14 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch fmt.Errorf("failed to parse url '%s': %w", obj.Spec.URL, err), sourcev1.URLInvalidReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } - authOpts, err := r.getAuthOpts(ctx, obj, *u) + authOpts, err := r.getAuthOpts(ctx, obj, *u, proxyURL) if err != nil { - e := serror.NewGeneric( - fmt.Errorf("failed to configure authentication options: %w", err), - sourcev1.AuthenticationFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) // Return error as the world as observed may change - return sreconcile.ResultEmpty, e + return sreconcile.ResultEmpty, err } // Fetch the included artifact metadata. @@ -523,7 +529,7 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch if artifacts.Diff(obj.Status.IncludedArtifacts) { message := "included artifacts differ from last observed includes" if obj.Status.IncludedArtifacts != nil { - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "IncludeChange", message) + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "IncludeChange", "%s", message) } rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message) if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { @@ -544,7 +550,7 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch fmt.Errorf("git repository is empty"), "EmptyGitRepository", ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } // Assign the commit to the shared commit reference. @@ -588,6 +594,16 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch ctrl.LoggerFrom(ctx).V(logger.DebugLevel).Info("git repository checked out", "url", obj.Spec.URL, "revision", commitReference(obj, commit)) conditions.Delete(obj, sourcev1.FetchFailedCondition) + // Validate sparse checkout paths after successful checkout. + if err := r.validateSparseCheckoutPaths(ctx, obj, dir); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to sparse checkout directories : %w", err), + sourcev1.GitOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + // Verify commit signature if result, err := r.verifySignature(ctx, obj, *commit); err != nil || result == sreconcile.ResultEmpty { return result, err @@ -597,7 +613,7 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch if !obj.GetArtifact().HasRevision(commitReference(obj, commit)) { message := fmt.Sprintf("new upstream revision '%s'", commitReference(obj, commit)) if obj.GetArtifact() != nil { - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "%s", message) } rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message) if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { @@ -607,65 +623,177 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch return sreconcile.ResultSuccess, nil } -// getProxyOpts fetches the secret containing the proxy settings, constructs a -// transport.ProxyOptions object using those settings and then returns it. -func (r *GitRepositoryReconciler) getProxyOpts(ctx context.Context, proxySecretName, - proxySecretNamespace string) (*transport.ProxyOptions, error) { - proxyData, err := r.getSecretData(ctx, proxySecretName, proxySecretNamespace) - if err != nil { - return nil, fmt.Errorf("failed to get proxy secret '%s/%s': %w", proxySecretNamespace, proxySecretName, err) - } - address, ok := proxyData["address"] - if !ok { - return nil, fmt.Errorf("invalid proxy secret '%s/%s': key 'address' is missing", proxySecretNamespace, proxySecretName) - } - - proxyOpts := &transport.ProxyOptions{ - URL: string(address), - Username: string(proxyData["username"]), - Password: string(proxyData["password"]), - } - return proxyOpts, nil -} - // getAuthOpts fetches the secret containing the auth options (if specified), // constructs a git.AuthOptions object using those options along with the provided // URL and returns it. -func (r *GitRepositoryReconciler) getAuthOpts(ctx context.Context, obj *sourcev1.GitRepository, u url.URL) (*git.AuthOptions, error) { +func (r *GitRepositoryReconciler) getAuthOpts(ctx context.Context, obj *sourcev1.GitRepository, + u url.URL, proxyURL *url.URL) (*git.AuthOptions, error) { + var secret *corev1.Secret var authData map[string][]byte if obj.Spec.SecretRef != nil { var err error - authData, err = r.getSecretData(ctx, obj.Spec.SecretRef.Name, obj.GetNamespace()) + secret, err = r.getSecret(ctx, obj.Spec.SecretRef.Name, obj.GetNamespace()) if err != nil { - return nil, fmt.Errorf("failed to get secret '%s/%s': %w", obj.GetNamespace(), obj.Spec.SecretRef.Name, err) + e := serror.NewGeneric( + fmt.Errorf("failed to get secret '%s/%s': %w", obj.GetNamespace(), obj.Spec.SecretRef.Name, err), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e } + authData = secret.Data } // Configure authentication strategy to access the source - authOpts, err := git.NewAuthOptions(u, authData) + opts, err := git.NewAuthOptions(u, authData) if err != nil { - return nil, err + e := serror.NewGeneric( + fmt.Errorf("failed to configure authentication options: %w", err), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + + // Configure provider authentication if specified. + var getCreds func() (*authutils.GitCredentials, error) + switch provider := obj.GetProvider(); provider { + case sourcev1.GitProviderAzure: // If AWS or GCP are added in the future they can be added here separated by a comma. + getCreds = func() (*authutils.GitCredentials, error) { + opts := []auth.Option{ + auth.WithClient(r.Client), + auth.WithServiceAccountNamespace(obj.GetNamespace()), + } + + if obj.Spec.ServiceAccountName != "" { + // Check object-level workload identity feature gate. + if !auth.IsObjectLevelWorkloadIdentityEnabled() { + const gate = auth.FeatureGateObjectLevelWorkloadIdentity + const msgFmt = "to use spec.serviceAccountName for provider authentication please enable the %s feature gate in the controller" + err := serror.NewStalling(fmt.Errorf(msgFmt, gate), meta.FeatureGateDisabledReason) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, meta.FeatureGateDisabledReason, "%s", err) + return nil, err + } + // Set ServiceAccountName only if explicitly specified + opts = append(opts, auth.WithServiceAccountName(obj.Spec.ServiceAccountName)) + } + + if r.TokenCache != nil { + involvedObject := cache.InvolvedObject{ + Kind: sourcev1.GitRepositoryKind, + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + Operation: cache.OperationReconcile, + } + opts = append(opts, auth.WithCache(*r.TokenCache, involvedObject)) + } + + if proxyURL != nil { + opts = append(opts, auth.WithProxyURL(*proxyURL)) + } + + return authutils.GetGitCredentials(ctx, provider, opts...) + } + case sourcev1.GitProviderGitHub: + // if provider is github, but secret ref is not specified + if obj.Spec.SecretRef == nil { + e := serror.NewStalling( + fmt.Errorf("secretRef with github app data must be specified when provider is set to github"), + sourcev1.InvalidProviderConfigurationReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + authMethods, err := secrets.AuthMethodsFromSecret(ctx, secret, secrets.WithTLSSystemCertPool()) + if err != nil { + return nil, err + } + if !authMethods.HasGitHubAppData() { + e := serror.NewGeneric( + fmt.Errorf("secretRef with github app data must be specified when provider is set to github"), + sourcev1.InvalidProviderConfigurationReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + getCreds = func() (*authutils.GitCredentials, error) { + var appOpts []github.OptFunc + + appOpts = append(appOpts, github.WithAppData(authMethods.GitHubAppData)) + + if proxyURL != nil { + appOpts = append(appOpts, github.WithProxyURL(proxyURL)) + } + + if r.TokenCache != nil { + appOpts = append(appOpts, github.WithCache(r.TokenCache, sourcev1.GitRepositoryKind, + obj.GetName(), obj.GetNamespace(), cache.OperationReconcile)) + } + + if authMethods.HasTLS() { + appOpts = append(appOpts, github.WithTLSConfig(authMethods.TLS)) + } + + username, password, err := github.GetCredentials(ctx, appOpts...) + if err != nil { + return nil, err + } + return &authutils.GitCredentials{ + Username: username, + Password: password, + }, nil + } + default: + // analyze secret, if it has github app data, perhaps provider should have been github. + if appID := authData[github.KeyAppID]; len(appID) != 0 { + e := serror.NewGeneric( + fmt.Errorf("secretRef '%s/%s' has github app data but provider is not set to github", obj.GetNamespace(), obj.Spec.SecretRef.Name), + sourcev1.InvalidProviderConfigurationReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + } + if getCreds != nil { + creds, err := getCreds() + if err != nil { + // Check if it's already a structured error and preserve it + switch err.(type) { + case *serror.Stalling, *serror.Generic: + return nil, err + } + + e := serror.NewGeneric( + fmt.Errorf("failed to configure authentication options: %w", err), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + opts.BearerToken = creds.BearerToken + opts.Username = creds.Username + opts.Password = creds.Password } - return authOpts, nil + return opts, nil } -func (r *GitRepositoryReconciler) getSecretData(ctx context.Context, name, namespace string) (map[string][]byte, error) { +func (r *GitRepositoryReconciler) getSecret(ctx context.Context, name, namespace string) (*corev1.Secret, error) { key := types.NamespacedName{ Namespace: namespace, Name: name, } - var secret corev1.Secret - if err := r.Client.Get(ctx, key, &secret); err != nil { - return nil, err + secret := &corev1.Secret{} + if err := r.Client.Get(ctx, key, secret); err != nil { + return nil, fmt.Errorf("failed to get secret '%s/%s': %w", namespace, name, err) } - return secret.Data, nil + return secret, nil } // reconcileArtifact archives a new Artifact to the Storage, if the current // (Status) data on the object does not match the given. // // The inspection of the given data to the object is differed, ensuring any -// stale observations like v1beta2.ArtifactOutdatedCondition are removed. +// stale observations like v1.ArtifactOutdatedCondition are removed. // If the given Artifact and/or artifactSet (includes) and observed artifact // content config do not differ from the object's current, it returns early. // Source ignore patterns are loaded, and the given directory is archived while @@ -703,14 +831,14 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pat fmt.Errorf("failed to stat target artifact path: %w", err), sourcev1.StatOperationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } else if !f.IsDir() { e := serror.NewGeneric( fmt.Errorf("invalid target path: '%s' is not a directory", dir), sourcev1.InvalidPathReason, ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } @@ -720,7 +848,7 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pat fmt.Errorf("failed to create artifact directory: %w", err), sourcev1.DirCreationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } unlock, err := r.Storage.Lock(artifact) @@ -746,12 +874,12 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pat } // Archive directory to storage - if err := r.Storage.Archive(&artifact, dir, SourceIgnoreFilter(ps, ignoreDomain)); err != nil { + if err := r.Storage.Archive(&artifact, dir, storage.SourceIgnoreFilter(ps, ignoreDomain)); err != nil { e := serror.NewGeneric( fmt.Errorf("unable to archive artifact to storage: %w", err), sourcev1.ArchiveOperationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } @@ -761,6 +889,7 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pat obj.Status.ObservedIgnore = obj.Spec.Ignore obj.Status.ObservedRecurseSubmodules = obj.Spec.RecurseSubmodules obj.Status.ObservedInclude = obj.Spec.Include + obj.Status.ObservedSparseCheckout = obj.Spec.SparseCheckout // Remove the deprecated symlink. // TODO(hidde): remove 2 minor versions from introduction of v1. @@ -780,15 +909,15 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pat } // reconcileInclude reconciles the on the object specified -// v1beta2.GitRepositoryInclude list by copying their Artifact (sub)contents to +// v1.GitRepositoryInclude list by copying their Artifact (sub)contents to // the specified paths in the given directory. // // When one of the includes is unavailable, it marks the object with -// v1beta2.IncludeUnavailableCondition=True and returns early. +// v1.IncludeUnavailableCondition=True and returns early. // When the copy operations are successful, it removes the -// v1beta2.IncludeUnavailableCondition from the object. +// v1.IncludeUnavailableCondition from the object. // When the composed artifactSet differs from the current set in the Status of -// the object, it marks the object with v1beta2.ArtifactOutdatedCondition=True. +// the object, it marks the object with v1.ArtifactOutdatedCondition=True. func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.GitRepository, _ *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) { @@ -800,7 +929,7 @@ func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, sp *patc fmt.Errorf("path calculation for include '%s' failed: %w", incl.GitRepositoryRef.Name, err), "IllegalPath", ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } @@ -808,7 +937,7 @@ func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, sp *patc // such that the index of artifactSet matches with the index of Include. // Hence, index is used here to pick the associated artifact from // includes. - var artifact *sourcev1.Artifact + var artifact *meta.Artifact for j, art := range *includes { if i == j { artifact = art @@ -821,7 +950,7 @@ func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, sp *patc fmt.Errorf("failed to copy '%s' include from %s to %s: %w", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err), "CopyFailure", ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } } @@ -833,6 +962,7 @@ func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, sp *patc // performs a git checkout. func (r *GitRepositoryReconciler) gitCheckout(ctx context.Context, obj *sourcev1.GitRepository, authOpts *git.AuthOptions, proxyOpts *transport.ProxyOptions, dir string, optimized bool) (*git.Commit, error) { + // Configure checkout strategy. cloneOpts := repository.CloneConfig{ RecurseSubmodules: obj.Spec.RecurseSubmodules, @@ -845,7 +975,14 @@ func (r *GitRepositoryReconciler) gitCheckout(ctx context.Context, obj *sourcev1 cloneOpts.SemVer = ref.SemVer cloneOpts.RefName = ref.Name } - + if obj.Spec.SparseCheckout != nil { + // Trim any leading "./" in the directory paths since underlying go-git API does not honor them. + sparseCheckoutDirs := make([]string, len(obj.Spec.SparseCheckout)) + for i, path := range obj.Spec.SparseCheckout { + sparseCheckoutDirs[i] = strings.TrimPrefix(path, "./") + } + cloneOpts.SparseCheckoutDirectories = sparseCheckoutDirs + } // Only if the object has an existing artifact in storage, attempt to // short-circuit clone operation. reconcileStorage has already verified // that the artifact exists. @@ -872,7 +1009,7 @@ func (r *GitRepositoryReconciler) gitCheckout(ctx context.Context, obj *sourcev1 fmt.Errorf("failed to create Git client: %w", err), sourcev1.GitOperationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return nil, e } defer gitReader.Close() @@ -883,7 +1020,7 @@ func (r *GitRepositoryReconciler) gitCheckout(ctx context.Context, obj *sourcev1 fmt.Errorf("failed to checkout and determine revision: %w", err), sourcev1.GitOperationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return nil, e } @@ -902,7 +1039,7 @@ func (r *GitRepositoryReconciler) fetchIncludes(ctx context.Context, obj *source "NotFound", ) e.RequeueAfter = r.requeueDependency - conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, "%s", e) return nil, e } @@ -913,7 +1050,7 @@ func (r *GitRepositoryReconciler) fetchIncludes(ctx context.Context, obj *source "NoArtifact", ) e.RequeueAfter = r.requeueDependency - conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, "%s", e) return nil, e } @@ -929,10 +1066,10 @@ func (r *GitRepositoryReconciler) fetchIncludes(ctx context.Context, obj *source // verifySignature verifies the signature of the given Git commit and/or its referencing tag // depending on the verification mode specified on the object. // If the signature can not be verified or the verification fails, it records -// v1beta2.SourceVerifiedCondition=False and returns. -// When successful, it records v1beta2.SourceVerifiedCondition=True. +// v1.SourceVerifiedCondition=False and returns. +// When successful, it records v1.SourceVerifiedCondition=True. // If no verification mode is specified on the object, the -// v1beta2.SourceVerifiedCondition Condition is removed. +// v1.SourceVerifiedCondition Condition is removed. func (r *GitRepositoryReconciler) verifySignature(ctx context.Context, obj *sourcev1.GitRepository, commit git.Commit) (sreconcile.Result, error) { // Check if there is a commit verification is configured and remove any old // observations if there is none @@ -953,7 +1090,7 @@ func (r *GitRepositoryReconciler) verifySignature(ctx context.Context, obj *sour fmt.Errorf("PGP public keys secret error: %w", err), "VerificationError", ) - conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, e.Err.Error()) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } @@ -974,7 +1111,7 @@ func (r *GitRepositoryReconciler) verifySignature(ctx context.Context, obj *sour errors.New("cannot verify tag object's signature if a tag reference is not specified"), "InvalidVerificationMode", ) - conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, err.Reason, err.Err.Error()) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, err.Reason, "%s", err) return sreconcile.ResultEmpty, err } if !git.IsSignedTag(*tag) { @@ -985,7 +1122,7 @@ func (r *GitRepositoryReconciler) verifySignature(ctx context.Context, obj *sour fmt.Errorf("cannot verify signature of tag '%s' since it is not signed", commit.ReferencingTag.String()), "InvalidGitObject", ) - conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, err.Reason, err.Err.Error()) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, err.Reason, "%s", err) return sreconcile.ResultEmpty, err } @@ -996,7 +1133,7 @@ func (r *GitRepositoryReconciler) verifySignature(ctx context.Context, obj *sour fmt.Errorf("signature verification of tag '%s' failed: %w", tag.String(), err), "InvalidTagSignature", ) - conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, e.Err.Error()) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, "%s", e) // Return error in the hope the secret changes return sreconcile.ResultEmpty, e } @@ -1012,7 +1149,7 @@ func (r *GitRepositoryReconciler) verifySignature(ctx context.Context, obj *sour fmt.Errorf("signature verification of commit '%s' failed: %w", commit.Hash.String(), err), "InvalidCommitSignature", ) - conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, e.Err.Error()) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, "%s", e) // Return error in the hope the secret changes return sreconcile.ResultEmpty, e } @@ -1027,8 +1164,8 @@ func (r *GitRepositoryReconciler) verifySignature(ctx context.Context, obj *sour reason := meta.SucceededReason mode := obj.Spec.Verification.GetMode() obj.Status.SourceVerificationMode = &mode - conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, reason, message.String()) - r.eventLogf(ctx, obj, eventv1.EventTypeTrace, reason, message.String()) + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, reason, "%s", message.String()) + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, reason, "%s", message.String()) return sreconcile.ResultSuccess, nil } @@ -1045,6 +1182,10 @@ func (r *GitRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sour // Remove our finalizer from the list controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + // Cleanup caches. + r.TokenCache.DeleteEventsForObject(sourcev1.GitRepositoryKind, + obj.GetName(), obj.GetNamespace(), cache.OperationReconcile) + // Stop reconciliation as the object is being deleted return sreconcile.ResultEmpty, nil } @@ -1078,7 +1219,7 @@ func (r *GitRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourc } if len(delFiles) > 0 { r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", - fmt.Sprintf("garbage collected %d artifacts", len(delFiles))) + "garbage collected %d artifacts", len(delFiles)) return nil } } @@ -1118,10 +1259,18 @@ func gitContentConfigChanged(obj *sourcev1.GitRepository, includes *artifactSet) if requiresVerification(obj) { return true } + if len(obj.Spec.SparseCheckout) != len(obj.Status.ObservedSparseCheckout) { + return true + } + for index, dir := range obj.Spec.SparseCheckout { + if dir != obj.Status.ObservedSparseCheckout[index] { + return true + } + } // Convert artifactSet to index addressable artifacts and ensure that it and // the included artifacts include all the include from the spec. - artifacts := []*sourcev1.Artifact(*includes) + artifacts := []*meta.Artifact(*includes) if len(obj.Spec.Include) != len(artifacts) { return true } @@ -1152,6 +1301,19 @@ func gitContentConfigChanged(obj *sourcev1.GitRepository, includes *artifactSet) return false } +// validateSparseCheckoutPaths checks if the sparse checkout paths exist in the cloned repository. +func (r *GitRepositoryReconciler) validateSparseCheckoutPaths(ctx context.Context, obj *sourcev1.GitRepository, dir string) error { + if obj.Spec.SparseCheckout != nil { + for _, path := range obj.Spec.SparseCheckout { + fullPath := filepath.Join(dir, path) + if _, err := os.Lstat(fullPath); err != nil { + return fmt.Errorf("sparse checkout dir '%s' does not exist in repository: %w", path, err) + } + } + } + return nil +} + // Returns true if both GitRepositoryIncludes are equal. func gitRepositoryIncludeEqual(a, b sourcev1.GitRepositoryInclude) bool { if a.GitRepositoryRef != b.GitRepositoryRef { diff --git a/internal/controller/gitrepository_controller_fuzz_test.go b/internal/controller/gitrepository_controller_fuzz_test.go index 1751d096e..c9c136820 100644 --- a/internal/controller/gitrepository_controller_fuzz_test.go +++ b/internal/controller/gitrepository_controller_fuzz_test.go @@ -59,6 +59,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/manager" + intstorage "github.com/fluxcd/pkg/artifact/digest" "github.com/fluxcd/pkg/gittestserver" "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/testenv" @@ -77,7 +78,7 @@ var ( cfg *rest.Config testEnv *testenv.Environment - storage *Storage + storage *intstorage.Storage examplePublicKey []byte examplePrivateKey []byte @@ -477,7 +478,7 @@ func startEnvServer(setupReconcilers func(manager.Manager)) *envtest.Environment panic(err) } defer os.RemoveAll(tmpStoragePath) - storage, err = NewStorage(tmpStoragePath, "localhost:5050", time.Minute*1, 2) + storage, err = intstorage.New(tmpStoragePath, "localhost:5050", time.Minute*1, 2) if err != nil { panic(err) } diff --git a/internal/controller/gitrepository_controller_test.go b/internal/controller/gitrepository_controller_test.go index 800c65577..f9f7a591d 100644 --- a/internal/controller/gitrepository_controller_test.go +++ b/internal/controller/gitrepository_controller_test.go @@ -18,6 +18,7 @@ package controller import ( "context" + "encoding/json" "errors" "fmt" "net/http" @@ -33,7 +34,6 @@ import ( "github.com/go-git/go-git/v5/config" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/plumbing/transport" "github.com/go-git/go-git/v5/storage/memory" . "github.com/onsi/gomega" sshtestdata "golang.org/x/crypto/ssh/testdata" @@ -48,7 +48,10 @@ import ( kstatus "github.com/fluxcd/cli-utils/pkg/kstatus/status" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" "github.com/fluxcd/pkg/git" + "github.com/fluxcd/pkg/git/github" "github.com/fluxcd/pkg/gittestserver" "github.com/fluxcd/pkg/runtime/conditions" conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check" @@ -347,6 +350,8 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { server options secret *corev1.Secret beforeFunc func(obj *sourcev1.GitRepository) + secretFunc func(secret *corev1.Secret, baseURL string) + middlewareFunc gittestserver.HTTPMiddleware want sreconcile.Result wantErr bool assertConditions []metav1.Condition @@ -385,6 +390,63 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), }, }, + { + name: "HTTPS with mutual TLS makes Reconciling=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mtls-certs", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + "tls.crt": clientPublicKey, + "tls.key": clientPrivateKey, + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "mtls-certs"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + }, + }, + { + name: "HTTPS with mutual TLS and invalid private key makes CheckoutFailed=True and returns error", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-mtls-certs", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + "tls.crt": clientPublicKey, + "tls.key": []byte("invalid"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "invalid-mtls-certs"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "tls: failed to find any PEM data in key input"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + }, + }, { name: "HTTPS with CAFile secret makes Reconciling=True", protocol: "https", @@ -469,6 +531,85 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), }, }, + { + name: "mTLS GitHub App without ca.crt makes FetchFailed=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "gh-app-no-ca"}, + Data: map[string][]byte{ + github.KeyAppID: []byte("123"), + github.KeyAppInstallationID: []byte("456"), + github.KeyAppPrivateKey: sshtestdata.PEMBytes["rsa"], + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGitHub + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "gh-app-no-ca"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingWithRetryReason, "foo") + }, + secretFunc: func(secret *corev1.Secret, baseURL string) { + secret.Data[github.KeyAppBaseURL] = []byte(baseURL + "/api/v3") + }, + wantErr: true, + assertConditions: []metav1.Condition{ + // should record a FetchFailedCondition due to TLS handshake + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "x509: "), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingWithRetryReason, "foo"), + }, + }, + { + name: "mTLS GitHub App with ca.crt makes Reconciling=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + username: github.AccessTokenUsername, + password: "some-enterprise-token", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "gh-app-ca"}, + Data: map[string][]byte{ + github.KeyAppID: []byte("123"), + github.KeyAppInstallationID: []byte("456"), + github.KeyAppPrivateKey: sshtestdata.PEMBytes["rsa"], + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGitHub + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "gh-app-ca"} + }, + secretFunc: func(secret *corev1.Secret, baseURL string) { + secret.Data[github.KeyAppBaseURL] = []byte(baseURL + "/api/v3") + secret.Data["ca.crt"] = tlsCA + }, + middlewareFunc: func(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if strings.HasPrefix(r.URL.Path, "/api/v3/app/installations/") { + w.WriteHeader(http.StatusOK) + tok := &github.AppToken{ + Token: "some-enterprise-token", + ExpiresAt: time.Now().Add(time.Hour), + } + _ = json.NewEncoder(w).Encode(tok) + } + handler.ServeHTTP(w, r) + }) + }, + wantErr: false, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + }, + }, // TODO: Add test case for HTTPS with bearer token auth secret. It // depends on gitkit to have support for bearer token based // authentication. @@ -558,7 +699,7 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { beforeFunc: func(obj *sourcev1.GitRepository) { obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"} obj.Status = sourcev1.GitRepositoryStatus{ - Artifact: &sourcev1.Artifact{ + Artifact: &meta.Artifact{ Revision: "staging/some-revision", Path: randStringRunes(10), }, @@ -571,6 +712,78 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), }, }, + { + // This test is only for verifying the failure state when using + // provider auth. Protocol http is used for simplicity. + name: "github provider without secret ref makes FetchFailed=True", + protocol: "http", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGitHub + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.InvalidProviderConfigurationReason, "secretRef with github app data must be specified when provider is set to github"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + }, + }, + { + // This test is only for verifying the failure state when using + // provider auth. Protocol http is used for simplicity. + name: "empty provider with github app data in secret makes FetchFailed=True", + protocol: "http", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "github-app-secret", + }, + Data: map[string][]byte{ + github.KeyAppID: []byte("1111"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "github-app-secret"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.InvalidProviderConfigurationReason, "secretRef '/github-app-secret' has github app data but provider is not set to github"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + }, + }, + { + // This test is only for verifying the failure state when using + // provider auth. Protocol http is used for simplicity. + name: "github provider without github app data in secret makes FetchFailed=True", + protocol: "http", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "github-basic-auth", + }, + Data: map[string][]byte{ + "username": []byte("abc"), + "password": []byte("1234"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "github-basic-auth"} + obj.Spec.Provider = sourcev1.GitProviderGitHub + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.InvalidProviderConfigurationReason, "secretRef with github app data must be specified when provider is set to github"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + }, + }, } for _, tt := range tests { @@ -593,6 +806,10 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { defer os.RemoveAll(server.Root()) server.AutoCreate() + if tt.middlewareFunc != nil { + server.AddHTTPMiddlewares(tt.middlewareFunc) + } + repoPath := "/test.git" localRepo, err := initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath) g.Expect(err).NotTo(HaveOccurred()) @@ -637,6 +854,10 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { tt.beforeFunc(obj) } + if tt.secretFunc != nil { + tt.secretFunc(secret, server.HTTPAddress()) + } + clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). WithStatusSubresource(&sourcev1.GitRepository{}) @@ -683,6 +904,165 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { } } +func TestGitRepositoryReconciler_getAuthOpts_provider(t *testing.T) { + tests := []struct { + name string + url string + secret *corev1.Secret + beforeFunc func(obj *sourcev1.GitRepository) + wantErr string + }{ + { + name: "azure provider", + url: "https://dev.azure.com/foo/bar/_git/baz", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderAzure + }, + wantErr: "ManagedIdentityCredential", + }, + { + name: "azure provider with service account and feature gate for object-level identity disabled", + url: "https://dev.azure.com/foo/bar/_git/baz", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderAzure + obj.Spec.ServiceAccountName = "azure-sa" + }, + wantErr: auth.FeatureGateObjectLevelWorkloadIdentity, + }, + { + name: "github provider with no secret ref", + url: "https://github.com/org/repo.git", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGitHub + }, + wantErr: "secretRef with github app data must be specified when provider is set to github", + }, + { + name: "github provider with github app data in secret", + url: "https://example.com/org/repo", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "githubAppSecret", + }, + Data: map[string][]byte{ + github.KeyAppID: []byte("123"), + github.KeyAppInstallationID: []byte("456"), + github.KeyAppPrivateKey: []byte("abc"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGitHub + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "githubAppSecret", + } + }, + wantErr: "Key must be a PEM encoded PKCS1 or PKCS8 key", + }, + { + name: "generic provider with github app data in secret", + url: "https://example.com/org/repo", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "githubAppSecret", + }, + Data: map[string][]byte{ + github.KeyAppID: []byte("123"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGeneric + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "githubAppSecret", + } + }, + wantErr: "secretRef '/githubAppSecret' has github app data but provider is not set to github", + }, + { + name: "github provider with basic auth secret", + url: "https://github.com/org/repo.git", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "basic-auth-secret", + }, + Data: map[string][]byte{ + "username": []byte("abc"), + "password": []byte("1234"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGitHub + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "basic-auth-secret", + } + }, + wantErr: "secretRef with github app data must be specified when provider is set to github", + }, + { + name: "generic provider", + url: "https://example.com/org/repo", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGeneric + }, + }, + { + name: "secret ref defined for non existing secret", + url: "https://github.com/org/repo.git", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "authSecret", + } + }, + wantErr: "failed to get secret '/authSecret': secrets \"authSecret\" not found", + }, + { + url: "https://example.com/org/repo", + name: "no provider", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.GitRepository{}) + + if tt.secret != nil { + clientBuilder.WithObjects(tt.secret) + } + + obj := &sourcev1.GitRepository{} + r := &GitRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: clientBuilder.Build(), + features: features.FeatureGates(), + patchOptions: getPatchOptions(gitRepositoryReadyCondition.Owned, "sc"), + } + + url, err := url.Parse(tt.url) + g.Expect(err).ToNot(HaveOccurred()) + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + opts, err := r.getAuthOpts(ctx, obj, *url, nil) + + if tt.wantErr != "" { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(opts).ToNot(BeNil()) + g.Expect(opts.BearerToken).To(BeEmpty()) + g.Expect(opts.Username).To(BeEmpty()) + g.Expect(opts.Password).To(BeEmpty()) + } + }) + } +} + func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) { g := NewWithT(t) @@ -786,7 +1166,7 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) }, beforeFunc: func(obj *sourcev1.GitRepository, latestRev string) { obj.Status = sourcev1.GitRepositoryStatus{ - Artifact: &sourcev1.Artifact{ + Artifact: &meta.Artifact{ Revision: "staging/some-revision", Path: randStringRunes(10), }, @@ -807,7 +1187,7 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) beforeFunc: func(obj *sourcev1.GitRepository, latestRev string) { // Add existing artifact on the object and storage. obj.Status = sourcev1.GitRepositoryStatus{ - Artifact: &sourcev1.Artifact{ + Artifact: &meta.Artifact{ Revision: "staging@sha1:" + latestRev, Path: randStringRunes(10), }, @@ -830,7 +1210,7 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) obj.Spec.Ignore = ptr.To("foo") // Add existing artifact on the object and storage. obj.Status = sourcev1.GitRepositoryStatus{ - Artifact: &sourcev1.Artifact{ + Artifact: &meta.Artifact{ Revision: "staging@sha1:" + latestRev, Path: randStringRunes(10), }, @@ -961,7 +1341,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { { name: "Archiving artifact to storage with includes makes ArtifactInStorage=True", dir: "testdata/git/repository", - includes: artifactSet{&sourcev1.Artifact{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91"}}, + includes: artifactSet{&meta.Artifact{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91"}}, beforeFunc: func(obj *sourcev1.GitRepository) { obj.Spec.Interval = metav1.Duration{Duration: interval} obj.Spec.Include = []sourcev1.GitRepositoryInclude{ @@ -981,14 +1361,14 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { { name: "Up-to-date artifact should not update status", dir: "testdata/git/repository", - includes: artifactSet{&sourcev1.Artifact{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91", Digest: "some-checksum"}}, + includes: artifactSet{&meta.Artifact{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91", Digest: "some-checksum"}}, beforeFunc: func(obj *sourcev1.GitRepository) { obj.Spec.Interval = metav1.Duration{Duration: interval} obj.Spec.Include = []sourcev1.GitRepositoryInclude{ {GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}}, } - obj.Status.Artifact = &sourcev1.Artifact{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91"} - obj.Status.IncludedArtifacts = []*sourcev1.Artifact{{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91", Digest: "some-checksum"}} + obj.Status.Artifact = &meta.Artifact{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91"} + obj.Status.IncludedArtifacts = []*meta.Artifact{{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91", Digest: "some-checksum"}} obj.Status.ObservedInclude = obj.Spec.Include }, want: sreconcile.ResultSuccess, @@ -1123,6 +1503,8 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { server, err := testserver.NewTempArtifactServer() g.Expect(err).NotTo(HaveOccurred()) + server.Start() + defer server.Stop() storage, err := newTestStorage(server.HTTPServer) g.Expect(err).NotTo(HaveOccurred()) defer os.RemoveAll(storage.BasePath) @@ -1207,7 +1589,7 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { }, } if d.withArtifact { - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: d.name + ".tar.gz", Revision: d.name, LastUpdateTime: metav1.Now(), @@ -1299,20 +1681,20 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { func TestGitRepositoryReconciler_reconcileStorage(t *testing.T) { tests := []struct { name string - beforeFunc func(obj *sourcev1.GitRepository, storage *Storage) error + beforeFunc func(obj *sourcev1.GitRepository, storage *storage.Storage) error want sreconcile.Result wantErr bool - assertArtifact *sourcev1.Artifact + assertArtifact *meta.Artifact assertConditions []metav1.Condition assertPaths []string }{ { name: "garbage collects", - beforeFunc: func(obj *sourcev1.GitRepository, storage *Storage) error { + beforeFunc: func(obj *sourcev1.GitRepository, storage *storage.Storage) error { revisions := []string{"a", "b", "c", "d"} for n := range revisions { v := revisions[n] - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), Revision: v, } @@ -1330,7 +1712,7 @@ func TestGitRepositoryReconciler_reconcileStorage(t *testing.T) { conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") return nil }, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Path: "/reconcile-storage/d.txt", Revision: "d", Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", @@ -1358,8 +1740,8 @@ func TestGitRepositoryReconciler_reconcileStorage(t *testing.T) { }, { name: "notices missing artifact in storage", - beforeFunc: func(obj *sourcev1.GitRepository, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.GitRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ Path: "/reconcile-storage/invalid.txt", Revision: "e", } @@ -1377,10 +1759,10 @@ func TestGitRepositoryReconciler_reconcileStorage(t *testing.T) { }, { name: "notices empty artifact digest", - beforeFunc: func(obj *sourcev1.GitRepository, storage *Storage) error { + beforeFunc: func(obj *sourcev1.GitRepository, storage *storage.Storage) error { f := "empty-digest.txt" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), Revision: "fake", } @@ -1408,10 +1790,10 @@ func TestGitRepositoryReconciler_reconcileStorage(t *testing.T) { }, { name: "notices artifact digest mismatch", - beforeFunc: func(obj *sourcev1.GitRepository, storage *Storage) error { + beforeFunc: func(obj *sourcev1.GitRepository, storage *storage.Storage) error { f := "digest-mismatch.txt" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), Revision: "fake", } @@ -1439,8 +1821,8 @@ func TestGitRepositoryReconciler_reconcileStorage(t *testing.T) { }, { name: "updates hostname on diff from current", - beforeFunc: func(obj *sourcev1.GitRepository, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.GitRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ Path: "/reconcile-storage/hostname.txt", Revision: "f", Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", @@ -1459,7 +1841,7 @@ func TestGitRepositoryReconciler_reconcileStorage(t *testing.T) { assertPaths: []string{ "/reconcile-storage/hostname.txt", }, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Path: "/reconcile-storage/hostname.txt", Revision: "f", Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", @@ -1997,78 +2379,6 @@ func TestGitRepositoryReconciler_verifySignature(t *testing.T) { } } -func TestGitRepositoryReconciler_getProxyOpts(t *testing.T) { - invalidProxy := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "invalid-proxy", - Namespace: "default", - }, - Data: map[string][]byte{ - "url": []byte("https://example.com"), - }, - } - validProxy := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "valid-proxy", - Namespace: "default", - }, - Data: map[string][]byte{ - "address": []byte("https://example.com"), - "username": []byte("user"), - "password": []byte("pass"), - }, - } - - clientBuilder := fakeclient.NewClientBuilder(). - WithScheme(testEnv.GetScheme()). - WithObjects(invalidProxy, validProxy) - - r := &GitRepositoryReconciler{ - Client: clientBuilder.Build(), - } - - tests := []struct { - name string - secret string - err string - proxyOpts *transport.ProxyOptions - }{ - { - name: "non-existent secret", - secret: "non-existent", - err: "failed to get proxy secret 'default/non-existent': ", - }, - { - name: "invalid proxy secret", - secret: "invalid-proxy", - err: "invalid proxy secret 'default/invalid-proxy': key 'address' is missing", - }, - { - name: "valid proxy secret", - secret: "valid-proxy", - proxyOpts: &transport.ProxyOptions{ - URL: "https://example.com", - Username: "user", - Password: "pass", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - opts, err := r.getProxyOpts(context.TODO(), tt.secret, "default") - if opts != nil { - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(opts).To(Equal(tt.proxyOpts)) - } else { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring(tt.err)) - } - }) - } -} - func TestGitRepositoryReconciler_ConditionsUpdate(t *testing.T) { g := NewWithT(t) @@ -2491,7 +2801,7 @@ func TestGitRepositoryReconciler_notify(t *testing.T) { res: sreconcile.ResultSuccess, resErr: nil, newObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} }, commit: concreteCommit, wantEvent: "Normal NewArtifact stored artifact for commit 'test commit'", @@ -2501,12 +2811,12 @@ func TestGitRepositoryReconciler_notify(t *testing.T) { res: sreconcile.ResultSuccess, resErr: nil, oldObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, newObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, commit: concreteCommit, @@ -2517,12 +2827,12 @@ func TestGitRepositoryReconciler_notify(t *testing.T) { res: sreconcile.ResultSuccess, resErr: nil, oldObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, newObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Digest: "bbb"} + obj.Status.Artifact = &meta.Artifact{Revision: "aaa", Digest: "bbb"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, commit: concreteCommit, @@ -2533,11 +2843,11 @@ func TestGitRepositoryReconciler_notify(t *testing.T) { res: sreconcile.ResultSuccess, resErr: nil, oldObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, newObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, }, @@ -2546,12 +2856,12 @@ func TestGitRepositoryReconciler_notify(t *testing.T) { res: sreconcile.ResultEmpty, resErr: noopErr, oldObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, newObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, commit: partialCommit, // no-op will always result in partial commit. @@ -2642,7 +2952,7 @@ func TestGitRepositoryReconciler_fetchIncludes(t *testing.T) { {name: "b", toPath: "b/", shouldExist: true}, }, wantErr: false, - wantArtifactSet: []*sourcev1.Artifact{ + wantArtifactSet: []*meta.Artifact{ {Revision: "a"}, {Revision: "b"}, }, @@ -2700,7 +3010,7 @@ func TestGitRepositoryReconciler_fetchIncludes(t *testing.T) { }, } if d.withArtifact { - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: d.name + ".tar.gz", Revision: d.name, LastUpdateTime: metav1.Now(), @@ -2858,7 +3168,7 @@ func TestGitContentConfigChanged(t *testing.T) { tests := []struct { name string obj sourcev1.GitRepository - artifacts []*sourcev1.Artifact + artifacts []*meta.Artifact want bool }{ { @@ -2895,6 +3205,38 @@ func TestGitContentConfigChanged(t *testing.T) { }, want: false, }, + { + name: "unobserved sparse checkout", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{SparseCheckout: []string{"a/b/c", "x/y/z"}}, + Status: sourcev1.GitRepositoryStatus{ObservedSparseCheckout: []string{"a/b/c"}}, + }, + want: true, + }, + { + name: "unobserved case sensitive sparse checkout", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{SparseCheckout: []string{"a/b/c", "x/y/Z"}}, + Status: sourcev1.GitRepositoryStatus{ObservedSparseCheckout: []string{"a/b/c", "x/y/z"}}, + }, + want: true, + }, + { + name: "observed sparse checkout", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{SparseCheckout: []string{"a/b/c", "x/y/z"}}, + Status: sourcev1.GitRepositoryStatus{ObservedSparseCheckout: []string{"a/b/c", "x/y/z"}}, + }, + want: false, + }, + { + name: "observed sparse checkout with leading slash", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{SparseCheckout: []string{"./a/b/c", "./x/y/z"}}, + Status: sourcev1.GitRepositoryStatus{ObservedSparseCheckout: []string{"./a/b/c", "./x/y/z"}}, + }, + want: false, + }, { name: "unobserved include", obj: sourcev1.GitRepository{ @@ -2926,10 +3268,10 @@ func TestGitContentConfigChanged(t *testing.T) { ToPath: "baz", }, }, - IncludedArtifacts: []*sourcev1.Artifact{{Revision: "aaa", Digest: "bbb"}}, + IncludedArtifacts: []*meta.Artifact{{Revision: "aaa", Digest: "bbb"}}, }, }, - artifacts: []*sourcev1.Artifact{ + artifacts: []*meta.Artifact{ {Revision: "aaa", Digest: "bbb"}, }, want: false, @@ -2954,10 +3296,10 @@ func TestGitContentConfigChanged(t *testing.T) { ToPath: "baz", }, }, - IncludedArtifacts: []*sourcev1.Artifact{{Revision: "aaa", Digest: "bbb"}}, + IncludedArtifacts: []*meta.Artifact{{Revision: "aaa", Digest: "bbb"}}, }, }, - artifacts: []*sourcev1.Artifact{ + artifacts: []*meta.Artifact{ {Revision: "ccc", Digest: "bbb"}, }, want: true, @@ -2982,10 +3324,10 @@ func TestGitContentConfigChanged(t *testing.T) { ToPath: "baz", }, }, - IncludedArtifacts: []*sourcev1.Artifact{{Revision: "aaa", Digest: "bbb"}}, + IncludedArtifacts: []*meta.Artifact{{Revision: "aaa", Digest: "bbb"}}, }, }, - artifacts: []*sourcev1.Artifact{ + artifacts: []*meta.Artifact{ {Revision: "aaa", Digest: "ddd"}, }, want: true, @@ -3010,10 +3352,10 @@ func TestGitContentConfigChanged(t *testing.T) { ToPath: "baz", }, }, - IncludedArtifacts: []*sourcev1.Artifact{{Revision: "aaa", Digest: "bbb"}}, + IncludedArtifacts: []*meta.Artifact{{Revision: "aaa", Digest: "bbb"}}, }, }, - artifacts: []*sourcev1.Artifact{ + artifacts: []*meta.Artifact{ {Revision: "aaa", Digest: "bbb"}, }, want: true, @@ -3036,13 +3378,13 @@ func TestGitContentConfigChanged(t *testing.T) { }, }, Status: sourcev1.GitRepositoryStatus{ - IncludedArtifacts: []*sourcev1.Artifact{ + IncludedArtifacts: []*meta.Artifact{ {Revision: "aaa", Digest: "bbb"}, {Revision: "ccc", Digest: "ccc"}, }, }, }, - artifacts: []*sourcev1.Artifact{ + artifacts: []*meta.Artifact{ {Revision: "aaa", Digest: "bbb"}, {Revision: "ccc", Digest: "ddd"}, }, @@ -3078,13 +3420,13 @@ func TestGitContentConfigChanged(t *testing.T) { ToPath: "baz", }, }, - IncludedArtifacts: []*sourcev1.Artifact{ + IncludedArtifacts: []*meta.Artifact{ {Revision: "aaa", Digest: "bbb"}, {Revision: "ccc", Digest: "ccc"}, }, }, }, - artifacts: []*sourcev1.Artifact{ + artifacts: []*meta.Artifact{ {Revision: "aaa", Digest: "bbb"}, }, want: true, @@ -3119,12 +3461,12 @@ func TestGitContentConfigChanged(t *testing.T) { ToPath: "baz", }, }, - IncludedArtifacts: []*sourcev1.Artifact{ + IncludedArtifacts: []*meta.Artifact{ {Revision: "aaa", Digest: "bbb"}, }, }, }, - artifacts: []*sourcev1.Artifact{ + artifacts: []*meta.Artifact{ {Revision: "aaa", Digest: "bbb"}, {Revision: "ccc", Digest: "ccc"}, }, diff --git a/internal/controller/helmchart_controller.go b/internal/controller/helmchart_controller.go index b8d23be53..e969bf67a 100644 --- a/internal/controller/helmchart_controller.go +++ b/internal/controller/helmchart_controller.go @@ -19,6 +19,7 @@ package controller import ( "context" "crypto/tls" + "encoding/json" "errors" "fmt" "net/url" @@ -29,6 +30,7 @@ import ( "time" "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/notaryproject/notation-go/verifier/trustpolicy" "github.com/opencontainers/go-digest" "github.com/sigstore/cosign/v2/pkg/cosign" helmgetter "helm.sh/helm/v3/pkg/getter" @@ -41,6 +43,7 @@ import ( "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" kuberecorder "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -48,11 +51,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/ratelimiter" "sigs.k8s.io/controller-runtime/pkg/reconcile" eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/artifact/storage" "github.com/fluxcd/pkg/git" "github.com/fluxcd/pkg/runtime/conditions" helper "github.com/fluxcd/pkg/runtime/controller" @@ -63,13 +66,14 @@ import ( "github.com/fluxcd/pkg/tar" sourcev1 "github.com/fluxcd/source-controller/api/v1" - helmv1 "github.com/fluxcd/source-controller/api/v1beta2" "github.com/fluxcd/source-controller/internal/cache" serror "github.com/fluxcd/source-controller/internal/error" "github.com/fluxcd/source-controller/internal/helm/chart" "github.com/fluxcd/source-controller/internal/helm/getter" "github.com/fluxcd/source-controller/internal/helm/repository" soci "github.com/fluxcd/source-controller/internal/oci" + scosign "github.com/fluxcd/source-controller/internal/oci/cosign" + "github.com/fluxcd/source-controller/internal/oci/notation" sreconcile "github.com/fluxcd/source-controller/internal/reconcile" "github.com/fluxcd/source-controller/internal/reconcile/summarize" "github.com/fluxcd/source-controller/internal/util" @@ -129,7 +133,7 @@ type HelmChartReconciler struct { helper.Metrics RegistryClientGenerator RegistryClientGeneratorFunc - Storage *Storage + Storage *storage.Storage Getters helmgetter.Providers ControllerName string @@ -151,32 +155,32 @@ func (r *HelmChartReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Man } type HelmChartReconcilerOptions struct { - RateLimiter ratelimiter.RateLimiter + RateLimiter workqueue.TypedRateLimiter[reconcile.Request] } -// helmChartReconcileFunc is the function type for all the v1beta2.HelmChart +// helmChartReconcileFunc is the function type for all the v1.HelmChart // (sub)reconcile functions. The type implementations are grouped and // executed serially to perform the complete reconcile of the object. -type helmChartReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *helmv1.HelmChart, build *chart.Build) (sreconcile.Result, error) +type helmChartReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmChart, build *chart.Build) (sreconcile.Result, error) func (r *HelmChartReconciler) SetupWithManagerAndOptions(ctx context.Context, mgr ctrl.Manager, opts HelmChartReconcilerOptions) error { r.patchOptions = getPatchOptions(helmChartReadyCondition.Owned, r.ControllerName) - if err := mgr.GetCache().IndexField(ctx, &helmv1.HelmRepository{}, helmv1.HelmRepositoryURLIndexKey, + if err := mgr.GetCache().IndexField(ctx, &sourcev1.HelmRepository{}, sourcev1.HelmRepositoryURLIndexKey, r.indexHelmRepositoryByURL); err != nil { return fmt.Errorf("failed setting index fields: %w", err) } - if err := mgr.GetCache().IndexField(ctx, &helmv1.HelmChart{}, sourcev1.SourceIndexKey, + if err := mgr.GetCache().IndexField(ctx, &sourcev1.HelmChart{}, sourcev1.SourceIndexKey, r.indexHelmChartBySource); err != nil { return fmt.Errorf("failed setting index fields: %w", err) } return ctrl.NewControllerManagedBy(mgr). - For(&helmv1.HelmChart{}, builder.WithPredicates( + For(&sourcev1.HelmChart{}, builder.WithPredicates( predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}), )). Watches( - &helmv1.HelmRepository{}, + &sourcev1.HelmRepository{}, handler.EnqueueRequestsFromMapFunc(r.requestsForHelmRepositoryChange), builder.WithPredicates(SourceRevisionChangePredicate{}), ). @@ -186,7 +190,7 @@ func (r *HelmChartReconciler) SetupWithManagerAndOptions(ctx context.Context, mg builder.WithPredicates(SourceRevisionChangePredicate{}), ). Watches( - &helmv1.Bucket{}, + &sourcev1.Bucket{}, handler.EnqueueRequestsFromMapFunc(r.requestsForBucketChange), builder.WithPredicates(SourceRevisionChangePredicate{}), ). @@ -201,7 +205,7 @@ func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( log := ctrl.LoggerFrom(ctx) // Fetch the HelmChart - obj := &helmv1.HelmChart{} + obj := &sourcev1.HelmChart{} if err := r.Get(ctx, req.NamespacedName, obj); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } @@ -233,9 +237,7 @@ func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( } result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) - // Always record suspend, readiness and duration metrics. - r.Metrics.RecordSuspend(ctx, obj, obj.Spec.Suspend) - r.Metrics.RecordReadiness(ctx, obj) + // Always record duration metrics. r.Metrics.RecordDuration(ctx, obj, start) }() @@ -275,7 +277,7 @@ func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // reconcile iterates through the helmChartReconcileFunc tasks for the // object. It returns early on the first call that returns // reconcile.ResultRequeue, or produces an error. -func (r *HelmChartReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *helmv1.HelmChart, reconcilers []helmChartReconcileFunc) (sreconcile.Result, error) { +func (r *HelmChartReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmChart, reconcilers []helmChartReconcileFunc) (sreconcile.Result, error) { oldObj := obj.DeepCopy() rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress") @@ -328,7 +330,7 @@ func (r *HelmChartReconciler) reconcile(ctx context.Context, sp *patch.SerialPat } // notify emits notification related to the reconciliation. -func (r *HelmChartReconciler) notify(ctx context.Context, oldObj, newObj *helmv1.HelmChart, build *chart.Build, res sreconcile.Result, resErr error) { +func (r *HelmChartReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.HelmChart, build *chart.Build, res sreconcile.Result, resErr error) { // Notify successful reconciliation for new artifact and recovery from any // failure. if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil { @@ -364,7 +366,7 @@ func (r *HelmChartReconciler) notify(ctx context.Context, oldObj, newObj *helmv1 // condition is added. // The hostname of any URL in the Status of the object are updated, to ensure // they match the Storage server hostname of current runtime. -func (r *HelmChartReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, obj *helmv1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { +func (r *HelmChartReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { // Garbage collect previous advertised artifact(s) from storage _ = r.garbageCollect(ctx, obj) @@ -402,7 +404,7 @@ func (r *HelmChartReconciler) reconcileStorage(ctx context.Context, sp *patch.Se if artifactMissing { msg += ": disappeared from storage" } - rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, msg) + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg) conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) @@ -418,7 +420,7 @@ func (r *HelmChartReconciler) reconcileStorage(ctx context.Context, sp *patch.Se return sreconcile.ResultSuccess, nil } -func (r *HelmChartReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, obj *helmv1.HelmChart, build *chart.Build) (_ sreconcile.Result, retErr error) { +func (r *HelmChartReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmChart, build *chart.Build) (_ sreconcile.Result, retErr error) { // Remove any failed verification condition. // The reason is that a failing verification should be recalculated. if conditions.IsFalse(obj, sourcev1.SourceVerifiedCondition) { @@ -432,7 +434,7 @@ func (r *HelmChartReconciler) reconcileSource(ctx context.Context, sp *patch.Ser fmt.Errorf("failed to get source: %w", err), "SourceUnavailable", ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) // Return Kubernetes client errors, but ignore others which can only be // solved by a change in generation @@ -448,7 +450,7 @@ func (r *HelmChartReconciler) reconcileSource(ctx context.Context, sp *patch.Ser // Assert source has an artifact if s.GetArtifact() == nil || !r.Storage.ArtifactExist(*s.GetArtifact()) { // Set the condition to indicate that the source has no artifact for all types except OCI HelmRepository - if helmRepo, ok := s.(*helmv1.HelmRepository); !ok || helmRepo.Spec.Type != helmv1.HelmRepositoryTypeOCI { + if helmRepo, ok := s.(*sourcev1.HelmRepository); !ok || helmRepo.Spec.Type != sourcev1.HelmRepositoryTypeOCI { conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "NoSourceArtifact", "no artifact available for %s source '%s'", obj.Spec.SourceRef.Kind, obj.Spec.SourceRef.Name) r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "NoSourceArtifact", @@ -495,9 +497,9 @@ func (r *HelmChartReconciler) reconcileSource(ctx context.Context, sp *patch.Ser // Perform the build for the chart source type switch typedSource := s.(type) { - case *helmv1.HelmRepository: + case *sourcev1.HelmRepository: return r.buildFromHelmRepository(ctx, obj, typedSource, build) - case *sourcev1.GitRepository, *helmv1.Bucket: + case *sourcev1.GitRepository, *sourcev1.Bucket: return r.buildFromTarballArtifact(ctx, obj, *typedSource.GetArtifact(), build) default: // Ending up here should generally not be possible @@ -507,12 +509,12 @@ func (r *HelmChartReconciler) reconcileSource(ctx context.Context, sp *patch.Ser } // buildFromHelmRepository attempts to pull and/or package a Helm chart with -// the specified data from the v1beta2.HelmRepository and v1beta2.HelmChart +// the specified data from the v1.HelmRepository and v1.HelmChart // objects. -// In case of a failure it records v1beta2.FetchFailedCondition on the chart +// In case of a failure it records v1.FetchFailedCondition on the chart // object, and returns early. -func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj *helmv1.HelmChart, - repo *helmv1.HelmRepository, b *chart.Build) (sreconcile.Result, error) { +func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj *sourcev1.HelmChart, + repo *sourcev1.HelmRepository, b *chart.Build) (sreconcile.Result, error) { // Used to login with the repository declared provider ctxTimeout, cancel := context.WithTimeout(ctx, repo.GetTimeout()) defer cancel() @@ -528,7 +530,7 @@ func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj * err, sourcev1.AuthenticationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } if certsTmpDir != "" { @@ -545,7 +547,7 @@ func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj * // Initialize the chart repository var chartRepo repository.Downloader switch repo.Spec.Type { - case helmv1.HelmRepositoryTypeOCI: + case sourcev1.HelmRepositoryTypeOCI: if !helmreg.IsOCI(normalizedURL) { err := fmt.Errorf("invalid OCI registry URL: %s", normalizedURL) return chartRepoConfigErrorReturn(err, obj) @@ -561,7 +563,7 @@ func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj * fmt.Errorf("failed to construct Helm client: %w", err), meta.FailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } @@ -579,14 +581,14 @@ func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj * provider := obj.Spec.Verify.Provider verifiers, err = r.makeVerifiers(ctx, obj, *clientOpts) if err != nil { - if obj.Spec.Verify.SecretRef == nil { + if obj.Spec.Verify.SecretRef == nil && obj.Spec.Verify.Provider == "cosign" { provider = fmt.Sprintf("%s keyless", provider) } e := serror.NewGeneric( fmt.Errorf("failed to verify the signature using provider '%s': %w", provider, err), sourcev1.VerificationError, ) - conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, e.Err.Error()) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } } @@ -617,7 +619,7 @@ func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj * fmt.Errorf("failed to login to OCI registry: %w", err), sourcev1.AuthenticationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } } @@ -660,8 +662,9 @@ func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj * // Construct the chart builder with scoped configuration cb := chart.NewRemoteBuilder(chartRepo) opts := chart.BuildOptions{ - ValuesFiles: obj.GetValuesFiles(), - Force: obj.Generation != obj.Status.ObservedGeneration, + ValuesFiles: obj.GetValuesFiles(), + IgnoreMissingValuesFiles: obj.Spec.IgnoreMissingValuesFiles, + Force: obj.Generation != obj.Status.ObservedGeneration, // The remote builder will not attempt to download the chart if // an artifact exists with the same name and version and `Force` is false. // It will however try to verify the chart if `obj.Spec.Verify` is set, at every reconciliation. @@ -669,6 +672,7 @@ func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj * } if artifact := obj.GetArtifact(); artifact != nil { opts.CachedChart = r.Storage.LocalPath(*artifact) + opts.CachedChartValuesFiles = obj.Status.ObservedValuesFiles } // Set the VersionMetadata to the object's Generation if ValuesFiles is defined @@ -689,11 +693,11 @@ func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj * } // buildFromTarballArtifact attempts to pull and/or package a Helm chart with -// the specified data from the v1beta2.HelmChart object and the given -// v1beta2.Artifact. -// In case of a failure it records v1beta2.FetchFailedCondition on the chart +// the specified data from the v1.HelmChart object and the given +// v1.Artifact. +// In case of a failure it records v1.FetchFailedCondition on the chart // object, and returns early. -func (r *HelmChartReconciler) buildFromTarballArtifact(ctx context.Context, obj *helmv1.HelmChart, source sourcev1.Artifact, b *chart.Build) (sreconcile.Result, error) { +func (r *HelmChartReconciler) buildFromTarballArtifact(ctx context.Context, obj *sourcev1.HelmChart, source meta.Artifact, b *chart.Build) (sreconcile.Result, error) { // Create temporary working directory tmpDir, err := util.TempDirForObj("", obj) if err != nil { @@ -701,7 +705,7 @@ func (r *HelmChartReconciler) buildFromTarballArtifact(ctx context.Context, obj fmt.Errorf("failed to create temporary working directory: %w", err), sourcev1.DirCreationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } defer os.RemoveAll(tmpDir) @@ -713,7 +717,7 @@ func (r *HelmChartReconciler) buildFromTarballArtifact(ctx context.Context, obj fmt.Errorf("failed to create directory to untar source into: %w", err), sourcev1.DirCreationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } @@ -724,7 +728,7 @@ func (r *HelmChartReconciler) buildFromTarballArtifact(ctx context.Context, obj fmt.Errorf("failed to open source artifact: %w", err), sourcev1.ReadOperationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } if err = tar.Untar(f, sourceDir, tar.WithMaxUntarSize(-1)); err != nil { @@ -755,25 +759,27 @@ func (r *HelmChartReconciler) buildFromTarballArtifact(ctx context.Context, obj // Configure builder options, including any previously cached chart opts := chart.BuildOptions{ - ValuesFiles: obj.GetValuesFiles(), - Force: obj.Generation != obj.Status.ObservedGeneration, + ValuesFiles: obj.GetValuesFiles(), + IgnoreMissingValuesFiles: obj.Spec.IgnoreMissingValuesFiles, + Force: obj.Generation != obj.Status.ObservedGeneration, } - if artifact := obj.Status.Artifact; artifact != nil { + if artifact := obj.GetArtifact(); artifact != nil { opts.CachedChart = r.Storage.LocalPath(*artifact) + opts.CachedChartValuesFiles = obj.Status.ObservedValuesFiles } // Configure revision metadata for chart build if we should react to revision changes - if obj.Spec.ReconcileStrategy == helmv1.ReconcileStrategyRevision { + if obj.Spec.ReconcileStrategy == sourcev1.ReconcileStrategyRevision { rev := source.Revision if obj.Spec.SourceRef.Kind == sourcev1.GitRepositoryKind { rev = git.ExtractHashFromRevision(rev).String() } - if obj.Spec.SourceRef.Kind == helmv1.BucketKind { + if obj.Spec.SourceRef.Kind == sourcev1.BucketKind { if dig := digest.Digest(rev); dig.Validate() == nil { rev = dig.Encoded() } } - if kind := obj.Spec.SourceRef.Kind; kind == sourcev1.GitRepositoryKind || kind == helmv1.BucketKind { + if kind := obj.Spec.SourceRef.Kind; kind == sourcev1.GitRepositoryKind || kind == sourcev1.BucketKind { // The SemVer from the metadata is at times used in e.g. the label metadata for a resource // in a chart, which has a limited length of 63 characters. // To not fill most of this space with a full length SHA hex (40 characters for SHA-1, and @@ -815,12 +821,12 @@ func (r *HelmChartReconciler) buildFromTarballArtifact(ctx context.Context, obj // (Status) data on the object does not match the given. // // The inspection of the given data to the object is differed, ensuring any -// stale observations like v1beta2.ArtifactOutdatedCondition are removed. +// stale observations like v1.ArtifactOutdatedCondition are removed. // If the given Artifact does not differ from the object's current, it returns // early. // On a successful archive, the Artifact in the Status of the object is set, // and the symlink in the Storage is updated to its path. -func (r *HelmChartReconciler) reconcileArtifact(ctx context.Context, _ *patch.SerialPatcher, obj *helmv1.HelmChart, b *chart.Build) (sreconcile.Result, error) { +func (r *HelmChartReconciler) reconcileArtifact(ctx context.Context, _ *patch.SerialPatcher, obj *sourcev1.HelmChart, b *chart.Build) (sreconcile.Result, error) { // Without a complete chart build, there is little to reconcile if !b.Complete() { return sreconcile.ResultRequeue, nil @@ -830,7 +836,7 @@ func (r *HelmChartReconciler) reconcileArtifact(ctx context.Context, _ *patch.Se defer func() { if obj.Status.ObservedChartName == b.Name && obj.GetArtifact().HasRevision(b.Version) { conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) - conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, reasonForBuild(b), b.Summary()) + conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, reasonForBuild(b), "%s", b.Summary()) } }() @@ -852,7 +858,7 @@ func (r *HelmChartReconciler) reconcileArtifact(ctx context.Context, _ *patch.Se fmt.Errorf("failed to create artifact directory: %w", err), sourcev1.DirCreationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } unlock, err := r.Storage.Lock(artifact) @@ -861,7 +867,7 @@ func (r *HelmChartReconciler) reconcileArtifact(ctx context.Context, _ *patch.Se fmt.Errorf("failed to acquire lock for artifact: %w", err), sourcev1.AcquireLockFailedReason, ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } defer unlock() @@ -872,13 +878,18 @@ func (r *HelmChartReconciler) reconcileArtifact(ctx context.Context, _ *patch.Se fmt.Errorf("unable to copy Helm chart to storage: %w", err), sourcev1.ArchiveOperationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } // Record it on the object obj.Status.Artifact = artifact.DeepCopy() obj.Status.ObservedChartName = b.Name + if obj.Spec.IgnoreMissingValuesFiles { + obj.Status.ObservedValuesFiles = b.ValuesFiles + } else { + obj.Status.ObservedValuesFiles = nil + } // Update symlink on a "best effort" basis symURL, err := r.Storage.Symlink(artifact, "latest.tar.gz") @@ -895,15 +906,15 @@ func (r *HelmChartReconciler) reconcileArtifact(ctx context.Context, _ *patch.Se // getSource returns the v1beta1.Source for the given object, or an error describing why the source could not be // returned. -func (r *HelmChartReconciler) getSource(ctx context.Context, obj *helmv1.HelmChart) (sourcev1.Source, error) { +func (r *HelmChartReconciler) getSource(ctx context.Context, obj *sourcev1.HelmChart) (sourcev1.Source, error) { namespacedName := types.NamespacedName{ Namespace: obj.GetNamespace(), Name: obj.Spec.SourceRef.Name, } var s sourcev1.Source switch obj.Spec.SourceRef.Kind { - case helmv1.HelmRepositoryKind: - var repo helmv1.HelmRepository + case sourcev1.HelmRepositoryKind: + var repo sourcev1.HelmRepository if err := r.Client.Get(ctx, namespacedName, &repo); err != nil { return nil, err } @@ -914,15 +925,15 @@ func (r *HelmChartReconciler) getSource(ctx context.Context, obj *helmv1.HelmCha return nil, err } s = &repo - case helmv1.BucketKind: - var bucket helmv1.Bucket + case sourcev1.BucketKind: + var bucket sourcev1.Bucket if err := r.Client.Get(ctx, namespacedName, &bucket); err != nil { return nil, err } s = &bucket default: return nil, fmt.Errorf("unsupported source kind '%s', must be one of: %v", obj.Spec.SourceRef.Kind, []string{ - helmv1.HelmRepositoryKind, sourcev1.GitRepositoryKind, helmv1.BucketKind}) + sourcev1.HelmRepositoryKind, sourcev1.GitRepositoryKind, sourcev1.BucketKind}) } return s, nil } @@ -930,7 +941,7 @@ func (r *HelmChartReconciler) getSource(ctx context.Context, obj *helmv1.HelmCha // reconcileDelete handles the deletion of the object. // It first garbage collects all Artifacts for the object from the Storage. // Removing the finalizer from the object if successful. -func (r *HelmChartReconciler) reconcileDelete(ctx context.Context, obj *helmv1.HelmChart) (sreconcile.Result, error) { +func (r *HelmChartReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.HelmChart) (sreconcile.Result, error) { // Garbage collect the resource's artifacts if err := r.garbageCollect(ctx, obj); err != nil { // Return the error so we retry the failed garbage collection @@ -949,7 +960,7 @@ func (r *HelmChartReconciler) reconcileDelete(ctx context.Context, obj *helmv1.H // It removes all but the current Artifact from the Storage, unless the // deletion timestamp on the object is set. Which will result in the // removal of all Artifacts for the objects. -func (r *HelmChartReconciler) garbageCollect(ctx context.Context, obj *helmv1.HelmChart) error { +func (r *HelmChartReconciler) garbageCollect(ctx context.Context, obj *sourcev1.HelmChart) error { if !obj.DeletionTimestamp.IsZero() { if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { return serror.NewGeneric( @@ -973,7 +984,7 @@ func (r *HelmChartReconciler) garbageCollect(ctx context.Context, obj *helmv1.He } if len(delFiles) > 0 { r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", - fmt.Sprintf("garbage collected %d artifacts", len(delFiles))) + "garbage collected %d artifacts", len(delFiles)) return nil } } @@ -996,8 +1007,8 @@ func (r *HelmChartReconciler) namespacedChartRepositoryCallback(ctx context.Cont if apierrs.ReasonForError(err) != metav1.StatusReasonUnknown { return nil, err } - obj = &helmv1.HelmRepository{ - Spec: helmv1.HelmRepositorySpec{ + obj = &sourcev1.HelmRepository{ + Spec: sourcev1.HelmRepositorySpec{ URL: url, Timeout: &metav1.Duration{Duration: 60 * time.Second}, }, @@ -1085,13 +1096,13 @@ func (r *HelmChartReconciler) namespacedChartRepositoryCallback(ctx context.Cont } } -func (r *HelmChartReconciler) resolveDependencyRepository(ctx context.Context, url string, namespace string) (*helmv1.HelmRepository, error) { +func (r *HelmChartReconciler) resolveDependencyRepository(ctx context.Context, url string, namespace string) (*sourcev1.HelmRepository, error) { listOpts := []client.ListOption{ client.InNamespace(namespace), - client.MatchingFields{helmv1.HelmRepositoryURLIndexKey: url}, + client.MatchingFields{sourcev1.HelmRepositoryURLIndexKey: url}, client.Limit(1), } - var list helmv1.HelmRepositoryList + var list sourcev1.HelmRepositoryList err := r.Client.List(ctx, &list, listOpts...) if err != nil { return nil, fmt.Errorf("unable to retrieve HelmRepositoryList: %w", err) @@ -1103,7 +1114,7 @@ func (r *HelmChartReconciler) resolveDependencyRepository(ctx context.Context, u } func (r *HelmChartReconciler) indexHelmRepositoryByURL(o client.Object) []string { - repo, ok := o.(*helmv1.HelmRepository) + repo, ok := o.(*sourcev1.HelmRepository) if !ok { panic(fmt.Sprintf("Expected a HelmRepository, got %T", o)) } @@ -1115,7 +1126,7 @@ func (r *HelmChartReconciler) indexHelmRepositoryByURL(o client.Object) []string } func (r *HelmChartReconciler) indexHelmChartBySource(o client.Object) []string { - hc, ok := o.(*helmv1.HelmChart) + hc, ok := o.(*sourcev1.HelmChart) if !ok { panic(fmt.Sprintf("Expected a HelmChart, got %T", o)) } @@ -1123,7 +1134,7 @@ func (r *HelmChartReconciler) indexHelmChartBySource(o client.Object) []string { } func (r *HelmChartReconciler) requestsForHelmRepositoryChange(ctx context.Context, o client.Object) []reconcile.Request { - repo, ok := o.(*helmv1.HelmRepository) + repo, ok := o.(*sourcev1.HelmRepository) if !ok { ctrl.LoggerFrom(ctx).Error(fmt.Errorf("expected a HelmRepository, got %T", o), "failed to get requests for HelmRepository change") return nil @@ -1134,9 +1145,9 @@ func (r *HelmChartReconciler) requestsForHelmRepositoryChange(ctx context.Contex return nil } - var list helmv1.HelmChartList + var list sourcev1.HelmChartList if err := r.List(ctx, &list, client.MatchingFields{ - sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", helmv1.HelmRepositoryKind, repo.Name), + sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", sourcev1.HelmRepositoryKind, repo.Name), }); err != nil { ctrl.LoggerFrom(ctx).Error(err, "failed to list HelmCharts for HelmRepository change") return nil @@ -1164,7 +1175,7 @@ func (r *HelmChartReconciler) requestsForGitRepositoryChange(ctx context.Context return nil } - var list helmv1.HelmChartList + var list sourcev1.HelmChartList if err := r.List(ctx, &list, client.MatchingFields{ sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", sourcev1.GitRepositoryKind, repo.Name), }); err != nil { @@ -1182,7 +1193,7 @@ func (r *HelmChartReconciler) requestsForGitRepositoryChange(ctx context.Context } func (r *HelmChartReconciler) requestsForBucketChange(ctx context.Context, o client.Object) []reconcile.Request { - bucket, ok := o.(*helmv1.Bucket) + bucket, ok := o.(*sourcev1.Bucket) if !ok { ctrl.LoggerFrom(ctx).Error(fmt.Errorf("expected a Bucket, got %T", o), "failed to get reconcile requests for Bucket change") @@ -1194,9 +1205,9 @@ func (r *HelmChartReconciler) requestsForBucketChange(ctx context.Context, o cli return nil } - var list helmv1.HelmChartList + var list sourcev1.HelmChartList if err := r.List(ctx, &list, client.MatchingFields{ - sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", helmv1.BucketKind, bucket.Name), + sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", sourcev1.BucketKind, bucket.Name), }); err != nil { ctrl.LoggerFrom(ctx).Error(err, "failed to list HelmCharts for Bucket change") return nil @@ -1228,11 +1239,11 @@ func (r *HelmChartReconciler) eventLogf(ctx context.Context, obj runtime.Object, } // observeChartBuild records the observation on the given given build and error on the object. -func observeChartBuild(ctx context.Context, sp *patch.SerialPatcher, pOpts []patch.Option, obj *helmv1.HelmChart, build *chart.Build, err error) { +func observeChartBuild(ctx context.Context, sp *patch.SerialPatcher, pOpts []patch.Option, obj *sourcev1.HelmChart, build *chart.Build, err error) { if build.HasMetadata() { if build.Name != obj.Status.ObservedChartName || !obj.GetArtifact().HasRevision(build.Version) { if obj.GetArtifact() != nil { - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewChart", build.Summary()) + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewChart", "%s", build.Summary()) } rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", build.Summary()) if err := sp.Patch(ctx, obj, pOpts...); err != nil { @@ -1244,7 +1255,9 @@ func observeChartBuild(ctx context.Context, sp *patch.SerialPatcher, pOpts []pat if build.Complete() { conditions.Delete(obj, sourcev1.FetchFailedCondition) conditions.Delete(obj, sourcev1.BuildFailedCondition) - conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason, fmt.Sprintf("verified signature of version %s", build.Version)) + if build.VerifiedResult == soci.VerificationResultSuccess { + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of version %s", build.Version) + } } if obj.Spec.Verify == nil { @@ -1263,14 +1276,14 @@ func observeChartBuild(ctx context.Context, sp *patch.SerialPatcher, pOpts []pat switch buildErr.Reason { case chart.ErrChartMetadataPatch, chart.ErrValuesFilesMerge, chart.ErrDependencyBuild, chart.ErrChartPackage: conditions.Delete(obj, sourcev1.FetchFailedCondition) - conditions.MarkTrue(obj, sourcev1.BuildFailedCondition, buildErr.Reason.Reason, buildErr.Error()) + conditions.MarkTrue(obj, sourcev1.BuildFailedCondition, buildErr.Reason.Reason, "%s", buildErr) case chart.ErrChartVerification: conditions.Delete(obj, sourcev1.FetchFailedCondition) - conditions.MarkTrue(obj, sourcev1.BuildFailedCondition, buildErr.Reason.Reason, buildErr.Error()) - conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, buildErr.Error()) + conditions.MarkTrue(obj, sourcev1.BuildFailedCondition, buildErr.Reason.Reason, "%s", buildErr) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "%s", buildErr) default: conditions.Delete(obj, sourcev1.BuildFailedCondition) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, buildErr.Reason.Reason, buildErr.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, buildErr.Reason.Reason, "%s", buildErr) } return } @@ -1281,32 +1294,32 @@ func reasonForBuild(build *chart.Build) string { return "" } if build.Packaged { - return helmv1.ChartPackageSucceededReason + return sourcev1.ChartPackageSucceededReason } - return helmv1.ChartPullSucceededReason + return sourcev1.ChartPullSucceededReason } -func chartRepoConfigErrorReturn(err error, obj *helmv1.HelmChart) (sreconcile.Result, error) { +func chartRepoConfigErrorReturn(err error, obj *sourcev1.HelmChart) (sreconcile.Result, error) { switch err.(type) { case *url.Error: e := serror.NewStalling( fmt.Errorf("invalid Helm repository URL: %w", err), sourcev1.URLInvalidReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e default: e := serror.NewStalling( fmt.Errorf("failed to construct Helm client: %w", err), meta.FailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } } // makeVerifiers returns a list of verifiers for the given chart. -func (r *HelmChartReconciler) makeVerifiers(ctx context.Context, obj *helmv1.HelmChart, clientOpts getter.ClientOpts) ([]soci.Verifier, error) { +func (r *HelmChartReconciler) makeVerifiers(ctx context.Context, obj *sourcev1.HelmChart, clientOpts getter.ClientOpts) ([]soci.Verifier, error) { var verifiers []soci.Verifier verifyOpts := []remote.Option{} @@ -1318,26 +1331,27 @@ func (r *HelmChartReconciler) makeVerifiers(ctx context.Context, obj *helmv1.Hel switch obj.Spec.Verify.Provider { case "cosign": - defaultCosignOciOpts := []soci.Options{ - soci.WithRemoteOptions(verifyOpts...), + defaultCosignOciOpts := []scosign.Options{ + scosign.WithRemoteOptions(verifyOpts...), } // get the public keys from the given secret if secretRef := obj.Spec.Verify.SecretRef; secretRef != nil { - certSecretName := types.NamespacedName{ + + verifySecret := types.NamespacedName{ Namespace: obj.Namespace, Name: secretRef.Name, } - var pubSecret corev1.Secret - if err := r.Get(ctx, certSecretName, &pubSecret); err != nil { + pubSecret, err := r.retrieveSecret(ctx, verifySecret) + if err != nil { return nil, err } for k, data := range pubSecret.Data { // search for public keys in the secret if strings.HasSuffix(k, ".pub") { - verifier, err := soci.NewCosignVerifier(ctx, append(defaultCosignOciOpts, soci.WithPublicKey(data))...) + verifier, err := scosign.NewCosignVerifier(ctx, append(defaultCosignOciOpts, scosign.WithPublicKey(data))...) if err != nil { return nil, err } @@ -1346,7 +1360,7 @@ func (r *HelmChartReconciler) makeVerifiers(ctx context.Context, obj *helmv1.Hel } if len(verifiers) == 0 { - return nil, fmt.Errorf("no public keys found in secret '%s'", certSecretName) + return nil, fmt.Errorf("no public keys found in secret '%s'", verifySecret.String()) } return verifiers, nil } @@ -1359,9 +1373,67 @@ func (r *HelmChartReconciler) makeVerifiers(ctx context.Context, obj *helmv1.Hel SubjectRegExp: match.Subject, }) } - defaultCosignOciOpts = append(defaultCosignOciOpts, soci.WithIdentities(identities)) + defaultCosignOciOpts = append(defaultCosignOciOpts, scosign.WithIdentities(identities)) - verifier, err := soci.NewCosignVerifier(ctx, defaultCosignOciOpts...) + verifier, err := scosign.NewCosignVerifier(ctx, defaultCosignOciOpts...) + if err != nil { + return nil, err + } + verifiers = append(verifiers, verifier) + return verifiers, nil + case "notation": + // get the public keys from the given secret + secretRef := obj.Spec.Verify.SecretRef + + if secretRef == nil { + return nil, fmt.Errorf("verification secret cannot be empty: '%s'", obj.Name) + } + + verifySecret := types.NamespacedName{ + Namespace: obj.Namespace, + Name: secretRef.Name, + } + + pubSecret, err := r.retrieveSecret(ctx, verifySecret) + if err != nil { + return nil, err + } + + data, ok := pubSecret.Data[notation.DefaultTrustPolicyKey] + if !ok { + return nil, fmt.Errorf("'%s' not found in secret '%s'", notation.DefaultTrustPolicyKey, verifySecret.String()) + } + + var doc trustpolicy.Document + + if err := json.Unmarshal(data, &doc); err != nil { + return nil, fmt.Errorf("error occurred while parsing %s: %w", notation.DefaultTrustPolicyKey, err) + } + + var certs [][]byte + + for k, data := range pubSecret.Data { + if strings.HasSuffix(k, ".crt") || strings.HasSuffix(k, ".pem") { + certs = append(certs, data) + } + } + + if certs == nil { + return nil, fmt.Errorf("no certificates found in secret '%s'", verifySecret.String()) + } + + trustPolicy := notation.CleanTrustPolicy(&doc, ctrl.LoggerFrom(ctx)) + defaultNotationOciOpts := []notation.Options{ + notation.WithTrustPolicy(trustPolicy), + notation.WithRemoteOptions(verifyOpts...), + notation.WithAuth(clientOpts.Authenticator), + notation.WithKeychain(clientOpts.Keychain), + notation.WithInsecureRegistry(clientOpts.Insecure), + notation.WithLogger(ctrl.LoggerFrom(ctx)), + notation.WithRootCertificates(certs), + } + + verifier, err := notation.NewNotationVerifier(defaultNotationOciOpts...) if err != nil { return nil, err } @@ -1371,3 +1443,15 @@ func (r *HelmChartReconciler) makeVerifiers(ctx context.Context, obj *helmv1.Hel return nil, fmt.Errorf("unsupported verification provider: %s", obj.Spec.Verify.Provider) } } + +// retrieveSecret retrieves a secret from the specified namespace with the given secret name. +// It returns the retrieved secret and any error encountered during the retrieval process. +func (r *HelmChartReconciler) retrieveSecret(ctx context.Context, verifySecret types.NamespacedName) (corev1.Secret, error) { + + var pubSecret corev1.Secret + + if err := r.Get(ctx, verifySecret, &pubSecret); err != nil { + return corev1.Secret{}, err + } + return pubSecret, nil +} diff --git a/internal/controller/helmchart_controller_test.go b/internal/controller/helmchart_controller_test.go index c7c753b98..190a9f8b5 100644 --- a/internal/controller/helmchart_controller_test.go +++ b/internal/controller/helmchart_controller_test.go @@ -19,11 +19,12 @@ package controller import ( "bytes" "context" + "crypto/x509" "encoding/base64" + "encoding/json" "errors" "fmt" "io" - "net" "net/http" "os" "path" @@ -33,7 +34,14 @@ import ( "testing" "time" - "github.com/foxcpp/go-mockdns" + "github.com/fluxcd/pkg/artifact/config" + "github.com/fluxcd/pkg/artifact/digest" + "github.com/notaryproject/notation-core-go/signature/cose" + "github.com/notaryproject/notation-core-go/testhelper" + "github.com/notaryproject/notation-go" + nr "github.com/notaryproject/notation-go/registry" + "github.com/notaryproject/notation-go/signer" + "github.com/notaryproject/notation-go/verifier/trustpolicy" . "github.com/onsi/gomega" coptions "github.com/sigstore/cosign/v2/cmd/cosign/cli/options" "github.com/sigstore/cosign/v2/cmd/cosign/cli/sign" @@ -44,7 +52,10 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" + oras "oras.land/oras-go/v2/registry/remote" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -52,6 +63,7 @@ import ( kstatus "github.com/fluxcd/cli-utils/pkg/kstatus/status" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/artifact/storage" "github.com/fluxcd/pkg/helmtestserver" "github.com/fluxcd/pkg/runtime/conditions" conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check" @@ -60,12 +72,12 @@ import ( "github.com/fluxcd/pkg/testserver" sourcev1 "github.com/fluxcd/source-controller/api/v1" - helmv1 "github.com/fluxcd/source-controller/api/v1beta2" serror "github.com/fluxcd/source-controller/internal/error" "github.com/fluxcd/source-controller/internal/helm/chart" "github.com/fluxcd/source-controller/internal/helm/chart/secureloader" "github.com/fluxcd/source-controller/internal/helm/registry" "github.com/fluxcd/source-controller/internal/oci" + snotation "github.com/fluxcd/source-controller/internal/oci/notation" sreconcile "github.com/fluxcd/source-controller/internal/reconcile" "github.com/fluxcd/source-controller/internal/reconcile/summarize" ) @@ -82,13 +94,13 @@ func TestHelmChartReconciler_deleteBeforeFinalizer(t *testing.T) { g.Expect(k8sClient.Delete(ctx, namespace)).NotTo(HaveOccurred()) }) - helmchart := &helmv1.HelmChart{} + helmchart := &sourcev1.HelmChart{} helmchart.Name = "test-helmchart" helmchart.Namespace = namespaceName - helmchart.Spec = helmv1.HelmChartSpec{ + helmchart.Spec = sourcev1.HelmChartSpec{ Interval: metav1.Duration{Duration: interval}, Chart: "foo", - SourceRef: helmv1.LocalHelmChartSourceReference{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ Kind: "HelmRepository", Name: "bar", }, @@ -127,12 +139,12 @@ func TestHelmChartReconciler_Reconcile(t *testing.T) { tests := []struct { name string - beforeFunc func(repository *helmv1.HelmRepository) - assertFunc func(g *WithT, obj *helmv1.HelmChart, repository *helmv1.HelmRepository) + beforeFunc func(repository *sourcev1.HelmRepository) + assertFunc func(g *WithT, obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) }{ { name: "Reconciles chart build", - assertFunc: func(g *WithT, obj *helmv1.HelmChart, repository *helmv1.HelmRepository) { + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { origObj := obj.DeepCopy() key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} @@ -198,10 +210,10 @@ func TestHelmChartReconciler_Reconcile(t *testing.T) { }, { name: "Stalling on invalid repository URL", - beforeFunc: func(repository *helmv1.HelmRepository) { + beforeFunc: func(repository *sourcev1.HelmRepository) { repository.Spec.URL = "https://unsupported/foo://" // Invalid URL }, - assertFunc: func(g *WithT, obj *helmv1.HelmChart, _ *helmv1.HelmRepository) { + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, _ *sourcev1.HelmRepository) { key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} // Wait for HelmChart to be FetchFailed == true g.Eventually(func() bool { @@ -233,10 +245,10 @@ func TestHelmChartReconciler_Reconcile(t *testing.T) { }, { name: "Stalling on invalid oci repository URL", - beforeFunc: func(repository *helmv1.HelmRepository) { + beforeFunc: func(repository *sourcev1.HelmRepository) { repository.Spec.URL = strings.Replace(repository.Spec.URL, "http", "oci", 1) }, - assertFunc: func(g *WithT, obj *helmv1.HelmChart, _ *helmv1.HelmRepository) { + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, _ *sourcev1.HelmRepository) { key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} // Wait for HelmChart to be Ready g.Eventually(func() bool { @@ -280,12 +292,12 @@ func TestHelmChartReconciler_Reconcile(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() - repository := helmv1.HelmRepository{ + repository := sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "helmrepository-", Namespace: ns.Name, }, - Spec: helmv1.HelmRepositorySpec{ + Spec: sourcev1.HelmRepositorySpec{ URL: server.URL(), }, } @@ -297,16 +309,16 @@ func TestHelmChartReconciler_Reconcile(t *testing.T) { g.Expect(testEnv.CreateAndWait(ctx, &repository)).To(Succeed()) defer func() { g.Expect(testEnv.Delete(ctx, &repository)).To(Succeed()) }() - obj := helmv1.HelmChart{ + obj := sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "helmrepository-reconcile-", Namespace: ns.Name, }, - Spec: helmv1.HelmChartSpec{ + Spec: sourcev1.HelmChartSpec{ Chart: chartName, Version: chartVersion, - SourceRef: helmv1.LocalHelmChartSourceReference{ - Kind: helmv1.HelmRepositoryKind, + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: sourcev1.HelmRepositoryKind, Name: repository.Name, }, }, @@ -323,20 +335,20 @@ func TestHelmChartReconciler_Reconcile(t *testing.T) { func TestHelmChartReconciler_reconcileStorage(t *testing.T) { tests := []struct { name string - beforeFunc func(obj *helmv1.HelmChart, storage *Storage) error + beforeFunc func(obj *sourcev1.HelmChart, storage *storage.Storage) error want sreconcile.Result wantErr bool - assertArtifact *sourcev1.Artifact + assertArtifact *meta.Artifact assertConditions []metav1.Condition assertPaths []string }{ { name: "garbage collects", - beforeFunc: func(obj *helmv1.HelmChart, storage *Storage) error { + beforeFunc: func(obj *sourcev1.HelmChart, storage *storage.Storage) error { revisions := []string{"a", "b", "c", "d"} for n := range revisions { v := revisions[n] - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), Revision: v, } @@ -354,7 +366,7 @@ func TestHelmChartReconciler_reconcileStorage(t *testing.T) { conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") return nil }, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Path: "/reconcile-storage/d.txt", Revision: "d", Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", @@ -382,8 +394,8 @@ func TestHelmChartReconciler_reconcileStorage(t *testing.T) { }, { name: "notices missing artifact in storage", - beforeFunc: func(obj *helmv1.HelmChart, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.HelmChart, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ Path: "/reconcile-storage/invalid.txt", Revision: "d", } @@ -401,10 +413,10 @@ func TestHelmChartReconciler_reconcileStorage(t *testing.T) { }, { name: "notices empty artifact digest", - beforeFunc: func(obj *helmv1.HelmChart, storage *Storage) error { + beforeFunc: func(obj *sourcev1.HelmChart, storage *storage.Storage) error { f := "empty-digest.txt" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), Revision: "fake", } @@ -432,10 +444,10 @@ func TestHelmChartReconciler_reconcileStorage(t *testing.T) { }, { name: "notices artifact digest mismatch", - beforeFunc: func(obj *helmv1.HelmChart, storage *Storage) error { + beforeFunc: func(obj *sourcev1.HelmChart, storage *storage.Storage) error { f := "digest-mismatch.txt" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), Revision: "fake", } @@ -463,8 +475,8 @@ func TestHelmChartReconciler_reconcileStorage(t *testing.T) { }, { name: "updates hostname on diff from current", - beforeFunc: func(obj *helmv1.HelmChart, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.HelmChart, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ Path: "/reconcile-storage/hostname.txt", Revision: "f", Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", @@ -483,7 +495,7 @@ func TestHelmChartReconciler_reconcileStorage(t *testing.T) { assertPaths: []string{ "/reconcile-storage/hostname.txt", }, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Path: "/reconcile-storage/hostname.txt", Revision: "f", Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", @@ -506,14 +518,14 @@ func TestHelmChartReconciler_reconcileStorage(t *testing.T) { r := &HelmChartReconciler{ Client: fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&helmv1.HelmChart{}). + WithStatusSubresource(&sourcev1.HelmChart{}). Build(), EventRecorder: record.NewFakeRecorder(32), Storage: testStorage, patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), } - obj := &helmv1.HelmChart{ + obj := &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test-", Generation: 1, @@ -561,22 +573,30 @@ func TestHelmChartReconciler_reconcileSource(t *testing.T) { tmpDir := t.TempDir() - storage, err := NewStorage(tmpDir, "example.com", retentionTTL, retentionRecords) + opts := &config.Options{ + StoragePath: tmpDir, + StorageAddress: "example.com", + StorageAdvAddress: "example.com", + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) g.Expect(err).ToNot(HaveOccurred()) - gitArtifact := &sourcev1.Artifact{ + gitArtifact := &meta.Artifact{ Revision: "mock-ref/abcdefg12345678", Path: "mock.tgz", } - g.Expect(storage.Archive(gitArtifact, "testdata/charts", nil)).To(Succeed()) + g.Expect(st.Archive(gitArtifact, "testdata/charts", nil)).To(Succeed()) tests := []struct { name string source sourcev1.Source - beforeFunc func(obj *helmv1.HelmChart) + beforeFunc func(obj *sourcev1.HelmChart) want sreconcile.Result wantErr error - assertFunc func(g *WithT, build chart.Build, obj helmv1.HelmChart) + assertFunc func(g *WithT, build chart.Build, obj sourcev1.HelmChart) cleanFunc func(g *WithT, build *chart.Build) }{ { @@ -590,15 +610,15 @@ func TestHelmChartReconciler_reconcileSource(t *testing.T) { Artifact: gitArtifact, }, }, - beforeFunc: func(obj *helmv1.HelmChart) { + beforeFunc: func(obj *sourcev1.HelmChart) { obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" - obj.Spec.SourceRef = helmv1.LocalHelmChartSourceReference{ + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ Name: "gitrepository", Kind: sourcev1.GitRepositoryKind, } }, want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, build chart.Build, obj helmv1.HelmChart) { + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { g.Expect(build.Complete()).To(BeTrue()) g.Expect(build.Name).To(Equal("helmchart")) g.Expect(build.Version).To(Equal("0.1.0")) @@ -625,19 +645,19 @@ func TestHelmChartReconciler_reconcileSource(t *testing.T) { Artifact: gitArtifact, }, }, - beforeFunc: func(obj *helmv1.HelmChart) { + beforeFunc: func(obj *sourcev1.HelmChart) { obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" - obj.Spec.SourceRef = helmv1.LocalHelmChartSourceReference{ + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ Name: "gitrepository", Kind: sourcev1.GitRepositoryKind, } - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: "some-path", Revision: "some-rev", } }, want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, build chart.Build, obj helmv1.HelmChart) { + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { g.Expect(build.Complete()).To(BeTrue()) g.Expect(build.Name).To(Equal("helmchart")) g.Expect(build.Version).To(Equal("0.1.0")) @@ -653,8 +673,8 @@ func TestHelmChartReconciler_reconcileSource(t *testing.T) { }, { name: "Error on unavailable source", - beforeFunc: func(obj *helmv1.HelmChart) { - obj.Spec.SourceRef = helmv1.LocalHelmChartSourceReference{ + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ Name: "unavailable", Kind: sourcev1.GitRepositoryKind, } @@ -663,7 +683,7 @@ func TestHelmChartReconciler_reconcileSource(t *testing.T) { }, want: sreconcile.ResultEmpty, wantErr: &serror.Generic{Err: errors.New("gitrepositories.source.toolkit.fluxcd.io \"unavailable\" not found")}, - assertFunc: func(g *WithT, build chart.Build, obj helmv1.HelmChart) { + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { g.Expect(build.Complete()).To(BeFalse()) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ @@ -675,8 +695,8 @@ func TestHelmChartReconciler_reconcileSource(t *testing.T) { }, { name: "Stalling on unsupported source kind", - beforeFunc: func(obj *helmv1.HelmChart) { - obj.Spec.SourceRef = helmv1.LocalHelmChartSourceReference{ + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ Name: "unavailable", Kind: "Unsupported", } @@ -685,7 +705,7 @@ func TestHelmChartReconciler_reconcileSource(t *testing.T) { }, want: sreconcile.ResultEmpty, wantErr: &serror.Stalling{Err: errors.New("unsupported source kind 'Unsupported'")}, - assertFunc: func(g *WithT, build chart.Build, obj helmv1.HelmChart) { + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { g.Expect(build.Complete()).To(BeFalse()) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ @@ -706,9 +726,9 @@ func TestHelmChartReconciler_reconcileSource(t *testing.T) { Artifact: gitArtifact, }, }, - beforeFunc: func(obj *helmv1.HelmChart) { + beforeFunc: func(obj *sourcev1.HelmChart) { obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" - obj.Spec.SourceRef = helmv1.LocalHelmChartSourceReference{ + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ Name: "gitrepository", Kind: sourcev1.GitRepositoryKind, } @@ -718,7 +738,7 @@ func TestHelmChartReconciler_reconcileSource(t *testing.T) { }, want: sreconcile.ResultEmpty, wantErr: &serror.Stalling{Err: errors.New("values files merge error: no values file found at path")}, - assertFunc: func(g *WithT, build chart.Build, obj helmv1.HelmChart) { + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { g.Expect(build.Complete()).To(BeFalse()) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ @@ -737,9 +757,9 @@ func TestHelmChartReconciler_reconcileSource(t *testing.T) { }, Status: sourcev1.GitRepositoryStatus{}, }, - beforeFunc: func(obj *helmv1.HelmChart) { + beforeFunc: func(obj *sourcev1.HelmChart) { obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" - obj.Spec.SourceRef = helmv1.LocalHelmChartSourceReference{ + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ Name: "gitrepository", Kind: sourcev1.GitRepositoryKind, } @@ -748,7 +768,7 @@ func TestHelmChartReconciler_reconcileSource(t *testing.T) { conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") }, want: sreconcile.ResultRequeue, - assertFunc: func(g *WithT, build chart.Build, obj helmv1.HelmChart) { + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { g.Expect(build.Complete()).To(BeFalse()) g.Expect(obj.Status.ObservedSourceArtifactRevision).To(Equal("foo")) @@ -766,7 +786,7 @@ func TestHelmChartReconciler_reconcileSource(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&helmv1.HelmChart{}) + WithStatusSubresource(&sourcev1.HelmChart{}) if tt.source != nil { clientBuilder.WithRuntimeObjects(tt.source) @@ -775,17 +795,17 @@ func TestHelmChartReconciler_reconcileSource(t *testing.T) { r := &HelmChartReconciler{ Client: clientBuilder.Build(), EventRecorder: record.NewFakeRecorder(32), - Storage: storage, + Storage: st, patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), } - obj := helmv1.HelmChart{ + obj := sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "chart", Namespace: "default", Generation: 1, }, - Spec: helmv1.HelmChartSpec{}, + Spec: sourcev1.HelmChartSpec{}, } if tt.beforeFunc != nil { tt.beforeFunc(&obj) @@ -851,19 +871,19 @@ func TestHelmChartReconciler_buildFromHelmRepository(t *testing.T) { name string server options secret *corev1.Secret - beforeFunc func(obj *helmv1.HelmChart, repository *helmv1.HelmRepository) + beforeFunc func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) want sreconcile.Result wantErr error - assertFunc func(g *WithT, obj *helmv1.HelmChart, build chart.Build) + assertFunc func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) cleanFunc func(g *WithT, build *chart.Build) }{ { name: "Reconciles chart build", - beforeFunc: func(obj *helmv1.HelmChart, repository *helmv1.HelmRepository) { + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { obj.Spec.Chart = "helmchart" }, want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, _ *helmv1.HelmChart, build chart.Build) { + assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { g.Expect(build.Name).To(Equal(chartName)) g.Expect(build.Version).To(Equal(higherChartVersion)) g.Expect(build.Path).ToNot(BeEmpty()) @@ -888,13 +908,13 @@ func TestHelmChartReconciler_buildFromHelmRepository(t *testing.T) { "password": []byte("bar"), }, }, - beforeFunc: func(obj *helmv1.HelmChart, repository *helmv1.HelmRepository) { + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { obj.Spec.Chart = chartName obj.Spec.Version = chartVersion repository.Spec.SecretRef = &meta.LocalObjectReference{Name: "auth"} }, want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, _ *helmv1.HelmChart, build chart.Build) { + assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { g.Expect(build.Name).To(Equal(chartName)) g.Expect(build.Version).To(Equal(chartVersion)) g.Expect(build.Path).ToNot(BeEmpty()) @@ -906,32 +926,94 @@ func TestHelmChartReconciler_buildFromHelmRepository(t *testing.T) { }, { name: "Uses artifact as build cache", - beforeFunc: func(obj *helmv1.HelmChart, repository *helmv1.HelmRepository) { + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { obj.Spec.Chart = chartName obj.Spec.Version = chartVersion - obj.Status.Artifact = &sourcev1.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} + obj.Status.Artifact = &meta.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} }, want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, obj *helmv1.HelmChart, build chart.Build) { + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { g.Expect(build.Name).To(Equal(chartName)) g.Expect(build.Version).To(Equal(chartVersion)) g.Expect(build.Path).To(Equal(filepath.Join(serverFactory.Root(), obj.Status.Artifact.Path))) g.Expect(build.Path).To(BeARegularFile()) }, }, + { + name: "Uses artifact as build cache with observedValuesFiles", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Spec.Version = chartVersion + obj.Status.Artifact = &meta.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} + obj.Status.ObservedValuesFiles = []string{"values.yaml", "override.yaml"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(chartVersion)) + g.Expect(build.Path).To(Equal(filepath.Join(serverFactory.Root(), obj.Status.Artifact.Path))) + g.Expect(build.Path).To(BeARegularFile()) + g.Expect(build.ValuesFiles).To(Equal([]string{"values.yaml", "override.yaml"})) + }, + }, { name: "Sets Generation as VersionMetadata with values files", - beforeFunc: func(obj *helmv1.HelmChart, repository *helmv1.HelmRepository) { + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { obj.Spec.Chart = chartName obj.Generation = 3 obj.Spec.ValuesFiles = []string{"values.yaml", "override.yaml"} }, want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, _ *helmv1.HelmChart, build chart.Build) { + assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { g.Expect(build.Name).To(Equal(chartName)) g.Expect(build.Version).To(Equal(higherChartVersion + "+3")) g.Expect(build.Path).ToNot(BeEmpty()) g.Expect(build.Path).To(BeARegularFile()) + g.Expect(build.ValuesFiles).To(Equal([]string{"values.yaml", "override.yaml"})) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Missing values files are an error", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Spec.ValuesFiles = []string{"missing.yaml"} + }, + wantErr: &chart.BuildError{Err: errors.New("values files merge error: failed to merge chart values: no values file found at path 'missing.yaml'")}, + }, + { + name: "All missing values files ignored", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Spec.Version = chartVersion + obj.Spec.ValuesFiles = []string{"missing.yaml"} + obj.Spec.IgnoreMissingValuesFiles = true + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(chartVersion + "+0")) + g.Expect(build.ValuesFiles).To(BeEmpty()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Partial missing values files ignored", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Spec.Version = chartVersion + obj.Spec.ValuesFiles = []string{"values.yaml", "override.yaml", "invalid.yaml"} + obj.Spec.IgnoreMissingValuesFiles = true + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(chartVersion + "+0")) + g.Expect(build.ValuesFiles).To(Equal([]string{"values.yaml", "override.yaml"})) }, cleanFunc: func(g *WithT, build *chart.Build) { g.Expect(os.Remove(build.Path)).To(Succeed()) @@ -939,16 +1021,16 @@ func TestHelmChartReconciler_buildFromHelmRepository(t *testing.T) { }, { name: "Forces build on generation change", - beforeFunc: func(obj *helmv1.HelmChart, repository *helmv1.HelmRepository) { + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { obj.Generation = 3 obj.Spec.Chart = chartName obj.Spec.Version = chartVersion obj.Status.ObservedGeneration = 2 - obj.Status.Artifact = &sourcev1.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} + obj.Status.Artifact = &meta.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} }, want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, obj *helmv1.HelmChart, build chart.Build) { + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { g.Expect(build.Name).To(Equal(chartName)) g.Expect(build.Version).To(Equal(chartVersion)) g.Expect(build.Path).ToNot(Equal(filepath.Join(serverFactory.Root(), obj.Status.Artifact.Path))) @@ -960,29 +1042,29 @@ func TestHelmChartReconciler_buildFromHelmRepository(t *testing.T) { }, { name: "Event on unsuccessful secret retrieval", - beforeFunc: func(_ *helmv1.HelmChart, repository *helmv1.HelmRepository) { + beforeFunc: func(_ *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { repository.Spec.SecretRef = &meta.LocalObjectReference{ Name: "invalid", } }, want: sreconcile.ResultEmpty, - wantErr: &serror.Generic{Err: errors.New("failed to get authentication secret '/invalid'")}, - assertFunc: func(g *WithT, obj *helmv1.HelmChart, build chart.Build) { + wantErr: &serror.Generic{Err: errors.New("failed to get authentication secret '/invalid': secrets \"invalid\" not found")}, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { g.Expect(build.Complete()).To(BeFalse()) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get authentication secret '/invalid'"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get authentication secret '/invalid': secrets \"invalid\" not found"), })) }, }, { name: "Stalling on invalid client options", - beforeFunc: func(obj *helmv1.HelmChart, repository *helmv1.HelmRepository) { + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { repository.Spec.URL = "file://unsupported" // Unsupported protocol }, want: sreconcile.ResultEmpty, wantErr: &serror.Stalling{Err: errors.New("scheme \"file\" not supported")}, - assertFunc: func(g *WithT, obj *helmv1.HelmChart, build chart.Build) { + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { g.Expect(build.Complete()).To(BeFalse()) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ @@ -992,12 +1074,12 @@ func TestHelmChartReconciler_buildFromHelmRepository(t *testing.T) { }, { name: "Stalling on invalid repository URL", - beforeFunc: func(obj *helmv1.HelmChart, repository *helmv1.HelmRepository) { + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { repository.Spec.URL = "://unsupported" // Invalid URL }, want: sreconcile.ResultEmpty, wantErr: &serror.Stalling{Err: errors.New("missing protocol scheme")}, - assertFunc: func(g *WithT, obj *helmv1.HelmChart, build chart.Build) { + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { g.Expect(build.Complete()).To(BeFalse()) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ @@ -1007,7 +1089,7 @@ func TestHelmChartReconciler_buildFromHelmRepository(t *testing.T) { }, { name: "BuildError on temporary build error", - beforeFunc: func(obj *helmv1.HelmChart, _ *helmv1.HelmRepository) { + beforeFunc: func(obj *sourcev1.HelmChart, _ *sourcev1.HelmRepository) { obj.Spec.Chart = "invalid" }, want: sreconcile.ResultEmpty, @@ -1037,42 +1119,42 @@ func TestHelmChartReconciler_buildFromHelmRepository(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.Scheme()). - WithStatusSubresource(&helmv1.HelmChart{}) + WithStatusSubresource(&sourcev1.HelmChart{}) if tt.secret != nil { clientBuilder.WithObjects(tt.secret.DeepCopy()) } - storage, err := newTestStorage(server) + testStorage, err := newTestStorage(server) g.Expect(err).ToNot(HaveOccurred()) r := &HelmChartReconciler{ Client: clientBuilder.Build(), EventRecorder: record.NewFakeRecorder(32), Getters: testGetters, - Storage: storage, + Storage: testStorage, patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), } - repository := &helmv1.HelmRepository{ + repository := &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "helmrepository-", }, - Spec: helmv1.HelmRepositorySpec{ + Spec: sourcev1.HelmRepositorySpec{ URL: server.URL(), Timeout: &metav1.Duration{Duration: timeout}, }, - Status: helmv1.HelmRepositoryStatus{ - Artifact: &sourcev1.Artifact{ + Status: sourcev1.HelmRepositoryStatus{ + Artifact: &meta.Artifact{ Path: "index.yaml", }, }, } - obj := &helmv1.HelmChart{ + obj := &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "helmrepository-", }, - Spec: helmv1.HelmChartSpec{}, + Spec: sourcev1.HelmChartSpec{}, } if tt.beforeFunc != nil { @@ -1116,22 +1198,30 @@ func TestHelmChartReconciler_buildFromOCIHelmRepository(t *testing.T) { metadata, err := loadTestChartToOCI(chartData, testRegistryServer, "", "", "") g.Expect(err).NotTo(HaveOccurred()) - storage, err := NewStorage(tmpDir, "example.com", retentionTTL, retentionRecords) + opts := &config.Options{ + StoragePath: tmpDir, + StorageAddress: "example.com", + StorageAdvAddress: "example.com", + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) g.Expect(err).ToNot(HaveOccurred()) - cachedArtifact := &sourcev1.Artifact{ + cachedArtifact := &meta.Artifact{ Revision: "0.1.0", Path: metadata.Name + "-" + metadata.Version + ".tgz", } - g.Expect(storage.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) + g.Expect(st.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) tests := []struct { name string secret *corev1.Secret - beforeFunc func(obj *helmv1.HelmChart, repository *helmv1.HelmRepository) + beforeFunc func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) want sreconcile.Result wantErr error - assertFunc func(g *WithT, obj *helmv1.HelmChart, build chart.Build) + assertFunc func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) cleanFunc func(g *WithT, build *chart.Build) }{ { @@ -1147,13 +1237,13 @@ func TestHelmChartReconciler_buildFromOCIHelmRepository(t *testing.T) { `auth":"` + base64.StdEncoding.EncodeToString([]byte(testRegistryUsername+":"+testRegistryPassword)) + `"}}}`), }, }, - beforeFunc: func(obj *helmv1.HelmChart, repository *helmv1.HelmRepository) { + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { obj.Spec.Chart = metadata.Name obj.Spec.Version = metadata.Version repository.Spec.SecretRef = &meta.LocalObjectReference{Name: "auth"} }, want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, _ *helmv1.HelmChart, build chart.Build) { + assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { g.Expect(build.Name).To(Equal(metadata.Name)) g.Expect(build.Version).To(Equal(metadata.Version)) g.Expect(build.Path).ToNot(BeEmpty()) @@ -1174,13 +1264,13 @@ func TestHelmChartReconciler_buildFromOCIHelmRepository(t *testing.T) { "password": []byte(testRegistryPassword), }, }, - beforeFunc: func(obj *helmv1.HelmChart, repository *helmv1.HelmRepository) { + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { obj.Spec.Chart = metadata.Name obj.Spec.Version = metadata.Version repository.Spec.SecretRef = &meta.LocalObjectReference{Name: "auth"} }, want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, _ *helmv1.HelmChart, build chart.Build) { + assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { g.Expect(build.Name).To(Equal(metadata.Name)) g.Expect(build.Version).To(Equal(metadata.Version)) g.Expect(build.Path).ToNot(BeEmpty()) @@ -1192,34 +1282,35 @@ func TestHelmChartReconciler_buildFromOCIHelmRepository(t *testing.T) { }, { name: "Uses artifact as build cache", - beforeFunc: func(obj *helmv1.HelmChart, repository *helmv1.HelmRepository) { + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { obj.Spec.Chart = metadata.Name obj.Spec.Version = metadata.Version - obj.Status.Artifact = &sourcev1.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} + obj.Status.Artifact = &meta.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} }, want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, obj *helmv1.HelmChart, build chart.Build) { + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { g.Expect(build.Name).To(Equal(metadata.Name)) g.Expect(build.Version).To(Equal(metadata.Version)) - g.Expect(build.Path).To(Equal(storage.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).To(Equal(st.LocalPath(*cachedArtifact.DeepCopy()))) g.Expect(build.Path).To(BeARegularFile()) + g.Expect(build.ValuesFiles).To(BeEmpty()) }, }, { name: "Forces build on generation change", - beforeFunc: func(obj *helmv1.HelmChart, repository *helmv1.HelmRepository) { + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { obj.Generation = 3 obj.Spec.Chart = metadata.Name obj.Spec.Version = metadata.Version obj.Status.ObservedGeneration = 2 - obj.Status.Artifact = &sourcev1.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} + obj.Status.Artifact = &meta.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} }, want: sreconcile.ResultSuccess, - assertFunc: func(g *WithT, obj *helmv1.HelmChart, build chart.Build) { + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { g.Expect(build.Name).To(Equal(metadata.Name)) g.Expect(build.Version).To(Equal(metadata.Version)) - g.Expect(build.Path).ToNot(Equal(storage.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).ToNot(Equal(st.LocalPath(*cachedArtifact.DeepCopy()))) g.Expect(build.Path).To(BeARegularFile()) }, cleanFunc: func(g *WithT, build *chart.Build) { @@ -1228,29 +1319,29 @@ func TestHelmChartReconciler_buildFromOCIHelmRepository(t *testing.T) { }, { name: "Event on unsuccessful secret retrieval", - beforeFunc: func(_ *helmv1.HelmChart, repository *helmv1.HelmRepository) { + beforeFunc: func(_ *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { repository.Spec.SecretRef = &meta.LocalObjectReference{ Name: "invalid", } }, want: sreconcile.ResultEmpty, - wantErr: &serror.Generic{Err: errors.New("failed to get authentication secret '/invalid'")}, - assertFunc: func(g *WithT, obj *helmv1.HelmChart, build chart.Build) { + wantErr: &serror.Generic{Err: errors.New("failed to get authentication secret '/invalid': secrets \"invalid\" not found")}, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { g.Expect(build.Complete()).To(BeFalse()) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get authentication secret '/invalid'"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get authentication secret '/invalid': secrets \"invalid\" not found"), })) }, }, { name: "Stalling on invalid client options", - beforeFunc: func(obj *helmv1.HelmChart, repository *helmv1.HelmRepository) { + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { repository.Spec.URL = "https://unsupported" // Unsupported protocol }, want: sreconcile.ResultEmpty, wantErr: &serror.Stalling{Err: errors.New("failed to construct Helm client: invalid OCI registry URL: https://unsupported")}, - assertFunc: func(g *WithT, obj *helmv1.HelmChart, build chart.Build) { + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { g.Expect(build.Complete()).To(BeFalse()) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ @@ -1260,7 +1351,7 @@ func TestHelmChartReconciler_buildFromOCIHelmRepository(t *testing.T) { }, { name: "BuildError on temporary build error", - beforeFunc: func(obj *helmv1.HelmChart, _ *helmv1.HelmRepository) { + beforeFunc: func(obj *sourcev1.HelmChart, _ *sourcev1.HelmRepository) { obj.Spec.Chart = "invalid" }, want: sreconcile.ResultEmpty, @@ -1273,7 +1364,7 @@ func TestHelmChartReconciler_buildFromOCIHelmRepository(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.Scheme()). - WithStatusSubresource(&helmv1.HelmChart{}) + WithStatusSubresource(&sourcev1.HelmChart{}) if tt.secret != nil { clientBuilder.WithObjects(tt.secret.DeepCopy()) @@ -1283,28 +1374,28 @@ func TestHelmChartReconciler_buildFromOCIHelmRepository(t *testing.T) { Client: clientBuilder.Build(), EventRecorder: record.NewFakeRecorder(32), Getters: testGetters, - Storage: storage, + Storage: st, RegistryClientGenerator: registry.ClientGenerator, patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), } - repository := &helmv1.HelmRepository{ + repository := &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "helmrepository-", }, - Spec: helmv1.HelmRepositorySpec{ + Spec: sourcev1.HelmRepositorySpec{ URL: fmt.Sprintf("oci://%s/testrepo", testRegistryServer.registryHost), Timeout: &metav1.Duration{Duration: timeout}, - Provider: helmv1.GenericOCIProvider, - Type: helmv1.HelmRepositoryTypeOCI, + Provider: sourcev1.GenericOCIProvider, + Type: sourcev1.HelmRepositoryTypeOCI, Insecure: true, }, } - obj := &helmv1.HelmChart{ + obj := &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "helmrepository-", }, - Spec: helmv1.HelmChartSpec{}, + Spec: sourcev1.HelmChartSpec{}, } if tt.beforeFunc != nil { @@ -1338,37 +1429,37 @@ func TestHelmChartReconciler_buildFromTarballArtifact(t *testing.T) { tmpDir := t.TempDir() - // Unpatch the changes we make to the default DNS resolver in `setupRegistryServer()`. - // This is required because the changes somehow also cause remote lookups to fail and - // this test tests functionality related to remote dependencies. - mockdns.UnpatchNet(net.DefaultResolver) - defer func() { - testRegistryServer.dnsServer.PatchNet(net.DefaultResolver) - }() - - storage, err := NewStorage(tmpDir, "example.com", retentionTTL, retentionRecords) + opts := &config.Options{ + StoragePath: tmpDir, + StorageAddress: "example.com", + StorageAdvAddress: "example.com", + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) g.Expect(err).ToNot(HaveOccurred()) - chartsArtifact := &sourcev1.Artifact{ + chartsArtifact := &meta.Artifact{ Revision: "mock-ref/abcdefg12345678", Path: "mock.tgz", } - g.Expect(storage.Archive(chartsArtifact, "testdata/charts", nil)).To(Succeed()) - yamlArtifact := &sourcev1.Artifact{ + g.Expect(st.Archive(chartsArtifact, "testdata/charts", nil)).To(Succeed()) + yamlArtifact := &meta.Artifact{ Revision: "9876abcd", Path: "values.yaml", } - g.Expect(storage.CopyFromPath(yamlArtifact, "testdata/charts/helmchart/values.yaml")).To(Succeed()) - cachedArtifact := &sourcev1.Artifact{ + g.Expect(st.CopyFromPath(yamlArtifact, "testdata/charts/helmchart/values.yaml")).To(Succeed()) + cachedArtifact := &meta.Artifact{ Revision: "0.1.0", Path: "cached.tgz", } - g.Expect(storage.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) + g.Expect(st.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) tests := []struct { name string - source sourcev1.Artifact - beforeFunc func(obj *helmv1.HelmChart) + source meta.Artifact + beforeFunc func(obj *sourcev1.HelmChart) want sreconcile.Result wantErr error assertFunc func(g *WithT, build chart.Build) @@ -1377,7 +1468,7 @@ func TestHelmChartReconciler_buildFromTarballArtifact(t *testing.T) { { name: "Resolves chart dependencies and builds", source: *chartsArtifact.DeepCopy(), - beforeFunc: func(obj *helmv1.HelmChart) { + beforeFunc: func(obj *sourcev1.HelmChart) { obj.Spec.Chart = "testdata/charts/helmchartwithdeps" }, want: sreconcile.ResultSuccess, @@ -1399,10 +1490,10 @@ func TestHelmChartReconciler_buildFromTarballArtifact(t *testing.T) { { name: "ReconcileStrategyRevision sets VersionMetadata", source: *chartsArtifact.DeepCopy(), - beforeFunc: func(obj *helmv1.HelmChart) { + beforeFunc: func(obj *sourcev1.HelmChart) { obj.Spec.Chart = "testdata/charts/helmchart" obj.Spec.SourceRef.Kind = sourcev1.GitRepositoryKind - obj.Spec.ReconcileStrategy = helmv1.ReconcileStrategyRevision + obj.Spec.ReconcileStrategy = sourcev1.ReconcileStrategyRevision }, want: sreconcile.ResultSuccess, assertFunc: func(g *WithT, build chart.Build) { @@ -1418,7 +1509,7 @@ func TestHelmChartReconciler_buildFromTarballArtifact(t *testing.T) { { name: "ValuesFiles sets Generation as VersionMetadata", source: *chartsArtifact.DeepCopy(), - beforeFunc: func(obj *helmv1.HelmChart) { + beforeFunc: func(obj *sourcev1.HelmChart) { obj.Generation = 3 obj.Spec.Chart = "testdata/charts/helmchart" obj.Spec.SourceRef.Kind = sourcev1.GitRepositoryKind @@ -1433,6 +1524,10 @@ func TestHelmChartReconciler_buildFromTarballArtifact(t *testing.T) { g.Expect(build.Version).To(Equal("0.1.0+3")) g.Expect(build.ResolvedDependencies).To(Equal(0)) g.Expect(build.Path).To(BeARegularFile()) + g.Expect(build.ValuesFiles).To(Equal([]string{ + "testdata/charts/helmchart/values.yaml", + "testdata/charts/helmchart/override.yaml", + })) }, cleanFunc: func(g *WithT, build *chart.Build) { g.Expect(os.Remove(build.Path)).To(Succeed()) @@ -1441,22 +1536,40 @@ func TestHelmChartReconciler_buildFromTarballArtifact(t *testing.T) { { name: "Chart from storage cache", source: *chartsArtifact.DeepCopy(), - beforeFunc: func(obj *helmv1.HelmChart) { + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Status.Artifact = cachedArtifact.DeepCopy() + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0")) + g.Expect(build.Path).To(Equal(st.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).To(BeARegularFile()) + g.Expect(build.ValuesFiles).To(BeEmpty()) + }, + }, + { + name: "Chart from storage cache with ObservedValuesFiles", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" obj.Status.Artifact = cachedArtifact.DeepCopy() + obj.Status.ObservedValuesFiles = []string{"values.yaml", "override.yaml"} }, want: sreconcile.ResultSuccess, assertFunc: func(g *WithT, build chart.Build) { g.Expect(build.Name).To(Equal("helmchart")) g.Expect(build.Version).To(Equal("0.1.0")) - g.Expect(build.Path).To(Equal(storage.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).To(Equal(st.LocalPath(*cachedArtifact.DeepCopy()))) g.Expect(build.Path).To(BeARegularFile()) + g.Expect(build.ValuesFiles).To(Equal([]string{"values.yaml", "override.yaml"})) }, }, { name: "Generation change forces rebuild", source: *chartsArtifact.DeepCopy(), - beforeFunc: func(obj *helmv1.HelmChart) { + beforeFunc: func(obj *sourcev1.HelmChart) { obj.Generation = 2 obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" obj.Status.Artifact = cachedArtifact.DeepCopy() @@ -1466,8 +1579,9 @@ func TestHelmChartReconciler_buildFromTarballArtifact(t *testing.T) { assertFunc: func(g *WithT, build chart.Build) { g.Expect(build.Name).To(Equal("helmchart")) g.Expect(build.Version).To(Equal("0.1.0")) - g.Expect(build.Path).ToNot(Equal(storage.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).ToNot(Equal(st.LocalPath(*cachedArtifact.DeepCopy()))) g.Expect(build.Path).To(BeARegularFile()) + g.Expect(build.ValuesFiles).To(BeEmpty()) }, cleanFunc: func(g *WithT, build *chart.Build) { g.Expect(os.Remove(build.Path)).To(Succeed()) @@ -1475,7 +1589,7 @@ func TestHelmChartReconciler_buildFromTarballArtifact(t *testing.T) { }, { name: "Empty source artifact", - source: sourcev1.Artifact{}, + source: meta.Artifact{}, want: sreconcile.ResultEmpty, wantErr: &serror.Generic{Err: errors.New("no such file or directory")}, assertFunc: func(g *WithT, build chart.Build) { @@ -1499,21 +1613,21 @@ func TestHelmChartReconciler_buildFromTarballArtifact(t *testing.T) { r := &HelmChartReconciler{ Client: fakeclient.NewClientBuilder(). WithScheme(testEnv.Scheme()). - WithStatusSubresource(&helmv1.HelmChart{}). + WithStatusSubresource(&sourcev1.HelmChart{}). Build(), EventRecorder: record.NewFakeRecorder(32), - Storage: storage, + Storage: st, Getters: testGetters, RegistryClientGenerator: registry.ClientGenerator, patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), } - obj := &helmv1.HelmChart{ + obj := &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ Name: "artifact", Namespace: "default", }, - Spec: helmv1.HelmChartSpec{}, + Spec: sourcev1.HelmChartSpec{}, } if tt.beforeFunc != nil { tt.beforeFunc(obj) @@ -1546,16 +1660,16 @@ func TestHelmChartReconciler_reconcileArtifact(t *testing.T) { tests := []struct { name string build *chart.Build - beforeFunc func(obj *helmv1.HelmChart) + beforeFunc func(obj *sourcev1.HelmChart) want sreconcile.Result wantErr bool assertConditions []metav1.Condition - afterFunc func(t *WithT, obj *helmv1.HelmChart) + afterFunc func(t *WithT, obj *sourcev1.HelmChart) }{ { name: "Incomplete build requeues and does not update status", build: &chart.Build{}, - beforeFunc: func(obj *helmv1.HelmChart) { + beforeFunc: func(obj *sourcev1.HelmChart) { conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") }, want: sreconcile.ResultRequeue, @@ -1565,20 +1679,21 @@ func TestHelmChartReconciler_reconcileArtifact(t *testing.T) { }, { name: "Copying artifact to storage from build makes ArtifactInStorage=True", - build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz"), - beforeFunc: func(obj *helmv1.HelmChart) { + build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz", nil), + beforeFunc: func(obj *sourcev1.HelmChart) { conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") }, - afterFunc: func(t *WithT, obj *helmv1.HelmChart) { + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { t.Expect(obj.GetArtifact()).ToNot(BeNil()) t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:bbdf96023c912c393b49d5238e227576ed0d20d1bb145d7476d817b80e20c11a")) t.Expect(obj.GetArtifact().Revision).To(Equal("0.1.0")) t.Expect(obj.Status.URL).ToNot(BeEmpty()) t.Expect(obj.Status.ObservedChartName).To(Equal("helmchart")) + t.Expect(obj.Status.ObservedValuesFiles).To(BeNil()) }, want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, helmv1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), }, }, { @@ -1588,15 +1703,16 @@ func TestHelmChartReconciler_reconcileArtifact(t *testing.T) { Version: "0.1.0", Path: filepath.Join(testStorage.BasePath, "testdata/charts/helmchart-0.1.0.tgz"), }, - beforeFunc: func(obj *helmv1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{ Path: "testdata/charts/helmchart-0.1.0.tgz", } }, want: sreconcile.ResultSuccess, - afterFunc: func(t *WithT, obj *helmv1.HelmChart) { + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { t.Expect(obj.Status.Artifact.Path).To(Equal("testdata/charts/helmchart-0.1.0.tgz")) t.Expect(obj.Status.ObservedChartName).To(BeEmpty()) + t.Expect(obj.Status.ObservedValuesFiles).To(BeNil()) t.Expect(obj.Status.URL).To(BeEmpty()) }, }, @@ -1608,44 +1724,45 @@ func TestHelmChartReconciler_reconcileArtifact(t *testing.T) { Path: filepath.Join(testStorage.BasePath, "testdata/charts/helmchart-0.1.0.tgz"), Packaged: true, }, - beforeFunc: func(obj *helmv1.HelmChart) { + beforeFunc: func(obj *sourcev1.HelmChart) { obj.Status.ObservedChartName = "helmchart" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Revision: "0.1.0", Path: "testdata/charts/helmchart-0.1.0.tgz", } }, want: sreconcile.ResultSuccess, - afterFunc: func(t *WithT, obj *helmv1.HelmChart) { + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { t.Expect(obj.Status.Artifact.Path).To(Equal("testdata/charts/helmchart-0.1.0.tgz")) t.Expect(obj.Status.URL).To(BeEmpty()) }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, helmv1.ChartPackageSucceededReason, "packaged 'helmchart' chart with version '0.1.0'"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPackageSucceededReason, "packaged 'helmchart' chart with version '0.1.0'"), }, }, { name: "Removes ArtifactOutdatedCondition after creating new artifact", - build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz"), - beforeFunc: func(obj *helmv1.HelmChart) { + build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz", nil), + beforeFunc: func(obj *sourcev1.HelmChart) { conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") }, - afterFunc: func(t *WithT, obj *helmv1.HelmChart) { + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { t.Expect(obj.GetArtifact()).ToNot(BeNil()) t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:bbdf96023c912c393b49d5238e227576ed0d20d1bb145d7476d817b80e20c11a")) t.Expect(obj.GetArtifact().Revision).To(Equal("0.1.0")) t.Expect(obj.Status.URL).ToNot(BeEmpty()) t.Expect(obj.Status.ObservedChartName).To(Equal("helmchart")) + t.Expect(obj.Status.ObservedValuesFiles).To(BeNil()) }, want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, helmv1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), }, }, { name: "Creates latest symlink to the created artifact", - build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz"), - afterFunc: func(t *WithT, obj *helmv1.HelmChart) { + build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz", nil), + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { t.Expect(obj.GetArtifact()).ToNot(BeNil()) localPath := testStorage.LocalPath(*obj.GetArtifact()) @@ -1656,7 +1773,47 @@ func TestHelmChartReconciler_reconcileArtifact(t *testing.T) { }, want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, helmv1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "Updates ObservedValuesFiles after creating new artifact", + build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz", []string{"values.yaml", "override.yaml"}), + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:bbdf96023c912c393b49d5238e227576ed0d20d1bb145d7476d817b80e20c11a")) + t.Expect(obj.GetArtifact().Revision).To(Equal("0.1.0")) + t.Expect(obj.Status.URL).ToNot(BeEmpty()) + t.Expect(obj.Status.ObservedChartName).To(Equal("helmchart")) + t.Expect(obj.Status.ObservedValuesFiles).To(BeNil()) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "Updates ObservedValuesFiles with IgnoreMissingValuesFiles after creating new artifact", + build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz", []string{"values.yaml", "override.yaml"}), + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + obj.Spec.ValuesFiles = []string{"values.yaml", "missing.yaml", "override.yaml"} + obj.Spec.IgnoreMissingValuesFiles = true + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:bbdf96023c912c393b49d5238e227576ed0d20d1bb145d7476d817b80e20c11a")) + t.Expect(obj.GetArtifact().Revision).To(Equal("0.1.0")) + t.Expect(obj.Status.URL).ToNot(BeEmpty()) + t.Expect(obj.Status.ObservedChartName).To(Equal("helmchart")) + t.Expect(obj.Status.ObservedValuesFiles).To(Equal([]string{"values.yaml", "override.yaml"})) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), }, }, } @@ -1668,19 +1825,19 @@ func TestHelmChartReconciler_reconcileArtifact(t *testing.T) { r := &HelmChartReconciler{ Client: fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&helmv1.HelmChart{}). + WithStatusSubresource(&sourcev1.HelmChart{}). Build(), EventRecorder: record.NewFakeRecorder(32), Storage: testStorage, patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), } - obj := &helmv1.HelmChart{ + obj := &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "reconcile-artifact-", Generation: 1, }, - Status: helmv1.HelmChartStatus{}, + Status: sourcev1.HelmChartStatus{}, } if tt.beforeFunc != nil { tt.beforeFunc(obj) @@ -1706,10 +1863,10 @@ func TestHelmChartReconciler_reconcileArtifact(t *testing.T) { func TestHelmChartReconciler_getSource(t *testing.T) { mocks := []client.Object{ - &helmv1.HelmRepository{ + &sourcev1.HelmRepository{ TypeMeta: metav1.TypeMeta{ - Kind: helmv1.HelmRepositoryKind, - APIVersion: helmv1.GroupVersion.String(), + Kind: sourcev1.HelmRepositoryKind, + APIVersion: sourcev1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Name: "helmrepository", @@ -1726,10 +1883,10 @@ func TestHelmChartReconciler_getSource(t *testing.T) { Namespace: "foo", }, }, - &helmv1.Bucket{ + &sourcev1.Bucket{ TypeMeta: metav1.TypeMeta{ - Kind: helmv1.BucketKind, - APIVersion: helmv1.GroupVersion.String(), + Kind: sourcev1.BucketKind, + APIVersion: sourcev1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Name: "bucket", @@ -1739,7 +1896,7 @@ func TestHelmChartReconciler_getSource(t *testing.T) { } clientBuilder := fakeclient.NewClientBuilder(). - WithStatusSubresource(&helmv1.HelmChart{}). + WithStatusSubresource(&sourcev1.HelmChart{}). WithObjects(mocks...) r := &HelmChartReconciler{ @@ -1749,18 +1906,18 @@ func TestHelmChartReconciler_getSource(t *testing.T) { tests := []struct { name string - obj *helmv1.HelmChart + obj *sourcev1.HelmChart want sourcev1.Source wantErr bool }{ { name: "Get HelmRepository source for reference", - obj: &helmv1.HelmChart{ + obj: &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ Namespace: mocks[0].GetNamespace(), }, - Spec: helmv1.HelmChartSpec{ - SourceRef: helmv1.LocalHelmChartSourceReference{ + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ Name: mocks[0].GetName(), Kind: mocks[0].GetObjectKind().GroupVersionKind().Kind, }, @@ -1770,12 +1927,12 @@ func TestHelmChartReconciler_getSource(t *testing.T) { }, { name: "Get GitRepository source for reference", - obj: &helmv1.HelmChart{ + obj: &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ Namespace: mocks[1].GetNamespace(), }, - Spec: helmv1.HelmChartSpec{ - SourceRef: helmv1.LocalHelmChartSourceReference{ + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ Name: mocks[1].GetName(), Kind: mocks[1].GetObjectKind().GroupVersionKind().Kind, }, @@ -1785,12 +1942,12 @@ func TestHelmChartReconciler_getSource(t *testing.T) { }, { name: "Get Bucket source for reference", - obj: &helmv1.HelmChart{ + obj: &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ Namespace: mocks[2].GetNamespace(), }, - Spec: helmv1.HelmChartSpec{ - SourceRef: helmv1.LocalHelmChartSourceReference{ + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ Name: mocks[2].GetName(), Kind: mocks[2].GetObjectKind().GroupVersionKind().Kind, }, @@ -1800,12 +1957,12 @@ func TestHelmChartReconciler_getSource(t *testing.T) { }, { name: "Error on client error", - obj: &helmv1.HelmChart{ + obj: &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ Namespace: mocks[2].GetNamespace(), }, - Spec: helmv1.HelmChartSpec{ - SourceRef: helmv1.LocalHelmChartSourceReference{ + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ Name: mocks[1].GetName(), Kind: mocks[2].GetObjectKind().GroupVersionKind().Kind, }, @@ -1815,9 +1972,9 @@ func TestHelmChartReconciler_getSource(t *testing.T) { }, { name: "Error on unsupported source kind", - obj: &helmv1.HelmChart{ - Spec: helmv1.HelmChartSpec{ - SourceRef: helmv1.LocalHelmChartSourceReference{ + obj: &sourcev1.HelmChart{ + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ Name: "unsupported", Kind: "Unsupported", }, @@ -1838,7 +1995,18 @@ func TestHelmChartReconciler_getSource(t *testing.T) { return } - g.Expect(got).To(Equal(tt.want)) + // TODO(stefan): Remove this workaround when the controller-runtime fake client restores TypeMeta + // https://github.com/kubernetes-sigs/controller-runtime/issues/3302 + unstructuredGot, err := runtime.DefaultUnstructuredConverter.ToUnstructured(got) + g.Expect(err).ToNot(HaveOccurred()) + gotName, _, err := unstructured.NestedFieldCopy(unstructuredGot, "metadata", "name") + g.Expect(err).ToNot(HaveOccurred()) + unstructuredWant, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tt.want) + g.Expect(err).ToNot(HaveOccurred()) + wantName, _, err := unstructured.NestedFieldCopy(unstructuredWant, "metadata", "name") + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(gotName).To(Equal(wantName)) g.Expect(err).ToNot(HaveOccurred()) }) } @@ -1853,7 +2021,7 @@ func TestHelmChartReconciler_reconcileDelete(t *testing.T) { patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), } - obj := &helmv1.HelmChart{ + obj := &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ Name: "reconcile-delete-", DeletionTimestamp: &metav1.Time{Time: time.Now()}, @@ -1861,10 +2029,10 @@ func TestHelmChartReconciler_reconcileDelete(t *testing.T) { sourcev1.SourceFinalizer, }, }, - Status: helmv1.HelmChartStatus{}, + Status: sourcev1.HelmChartStatus{}, } - artifact := testStorage.NewArtifactFor(helmv1.HelmChartKind, obj.GetObjectMeta(), "revision", "foo.txt") + artifact := testStorage.NewArtifactFor(sourcev1.HelmChartKind, obj.GetObjectMeta(), "revision", "foo.txt") obj.Status.Artifact = &artifact got, err := r.reconcileDelete(ctx, obj) @@ -1877,7 +2045,7 @@ func TestHelmChartReconciler_reconcileDelete(t *testing.T) { func TestHelmChartReconciler_reconcileSubRecs(t *testing.T) { // Helper to build simple helmChartReconcileFunc with result and error. buildReconcileFuncs := func(r sreconcile.Result, e error) helmChartReconcileFunc { - return func(_ context.Context, _ *patch.SerialPatcher, _ *helmv1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { + return func(_ context.Context, _ *patch.SerialPatcher, _ *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { return r, e } } @@ -1932,11 +2100,11 @@ func TestHelmChartReconciler_reconcileSubRecs(t *testing.T) { { name: "multiple object status conditions mutations", reconcileFuncs: []helmChartReconcileFunc{ - func(_ context.Context, _ *patch.SerialPatcher, obj *helmv1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { + func(_ context.Context, _ *patch.SerialPatcher, obj *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision") return sreconcile.ResultSuccess, nil }, - func(_ context.Context, _ *patch.SerialPatcher, obj *helmv1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { + func(_ context.Context, _ *patch.SerialPatcher, obj *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { conditions.MarkTrue(obj, meta.ReconcilingCondition, "Progressing", "creating artifact") return sreconcile.ResultSuccess, nil }, @@ -1986,16 +2154,16 @@ func TestHelmChartReconciler_reconcileSubRecs(t *testing.T) { r := &HelmChartReconciler{ Client: fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&helmv1.HelmChart{}). + WithStatusSubresource(&sourcev1.HelmChart{}). Build(), patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), } - obj := &helmv1.HelmChart{ + obj := &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test-", Generation: tt.generation, }, - Status: helmv1.HelmChartStatus{ + Status: sourcev1.HelmChartStatus{ ObservedGeneration: tt.observedGeneration, }, } @@ -2016,7 +2184,7 @@ func TestHelmChartReconciler_reconcileSubRecs(t *testing.T) { } } -func mockChartBuild(name, version, path string) *chart.Build { +func mockChartBuild(name, version, path string, valuesFiles []string) *chart.Build { var copyP string if path != "" { f, err := os.Open(path) @@ -2032,22 +2200,23 @@ func mockChartBuild(name, version, path string) *chart.Build { } } return &chart.Build{ - Name: name, - Version: version, - Path: copyP, + Name: name, + Version: version, + Path: copyP, + ValuesFiles: valuesFiles, } } func TestHelmChartReconciler_statusConditions(t *testing.T) { tests := []struct { name string - beforeFunc func(obj *helmv1.HelmChart) + beforeFunc func(obj *sourcev1.HelmChart) assertConditions []metav1.Condition wantErr bool }{ { name: "positive conditions only", - beforeFunc: func(obj *helmv1.HelmChart) { + beforeFunc: func(obj *sourcev1.HelmChart) { conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") }, assertConditions: []metav1.Condition{ @@ -2057,7 +2226,7 @@ func TestHelmChartReconciler_statusConditions(t *testing.T) { }, { name: "multiple failures", - beforeFunc: func(obj *helmv1.HelmChart) { + beforeFunc: func(obj *sourcev1.HelmChart) { conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory") conditions.MarkTrue(obj, sourcev1.BuildFailedCondition, "ChartPackageError", "some error") @@ -2074,7 +2243,7 @@ func TestHelmChartReconciler_statusConditions(t *testing.T) { }, { name: "mixed positive and negative conditions", - beforeFunc: func(obj *helmv1.HelmChart) { + beforeFunc: func(obj *sourcev1.HelmChart) { conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") }, @@ -2091,10 +2260,10 @@ func TestHelmChartReconciler_statusConditions(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - obj := &helmv1.HelmChart{ + obj := &sourcev1.HelmChart{ TypeMeta: metav1.TypeMeta{ - Kind: helmv1.HelmChartKind, - APIVersion: helmv1.GroupVersion.String(), + Kind: sourcev1.HelmChartKind, + APIVersion: sourcev1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Name: "helmchart", @@ -2104,7 +2273,7 @@ func TestHelmChartReconciler_statusConditions(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithObjects(obj). - WithStatusSubresource(&helmv1.HelmChart{}) + WithStatusSubresource(&sourcev1.HelmChart{}) c := clientBuilder.Build() @@ -2141,8 +2310,8 @@ func TestHelmChartReconciler_notify(t *testing.T) { name string res sreconcile.Result resErr error - oldObjBeforeFunc func(obj *helmv1.HelmChart) - newObjBeforeFunc func(obj *helmv1.HelmChart) + oldObjBeforeFunc func(obj *sourcev1.HelmChart) + newObjBeforeFunc func(obj *sourcev1.HelmChart) wantEvent string }{ { @@ -2154,8 +2323,8 @@ func TestHelmChartReconciler_notify(t *testing.T) { name: "new artifact", res: sreconcile.ResultSuccess, resErr: nil, - newObjBeforeFunc: func(obj *helmv1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + newObjBeforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} }, wantEvent: "Normal ChartPackageSucceeded packaged", }, @@ -2163,13 +2332,13 @@ func TestHelmChartReconciler_notify(t *testing.T) { name: "recovery from failure", res: sreconcile.ResultSuccess, resErr: nil, - oldObjBeforeFunc: func(obj *helmv1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + oldObjBeforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, - newObjBeforeFunc: func(obj *helmv1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + newObjBeforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, wantEvent: "Normal ChartPackageSucceeded packaged", @@ -2178,13 +2347,13 @@ func TestHelmChartReconciler_notify(t *testing.T) { name: "recovery and new artifact", res: sreconcile.ResultSuccess, resErr: nil, - oldObjBeforeFunc: func(obj *helmv1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + oldObjBeforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, - newObjBeforeFunc: func(obj *helmv1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Digest: "bbb"} + newObjBeforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{Revision: "aaa", Digest: "bbb"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, wantEvent: "Normal ChartPackageSucceeded packaged", @@ -2193,12 +2362,12 @@ func TestHelmChartReconciler_notify(t *testing.T) { name: "no updates", res: sreconcile.ResultSuccess, resErr: nil, - oldObjBeforeFunc: func(obj *helmv1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + oldObjBeforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, - newObjBeforeFunc: func(obj *helmv1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + newObjBeforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, }, @@ -2209,7 +2378,7 @@ func TestHelmChartReconciler_notify(t *testing.T) { g := NewWithT(t) recorder := record.NewFakeRecorder(32) - oldObj := &helmv1.HelmChart{} + oldObj := &sourcev1.HelmChart{} newObj := oldObj.DeepCopy() if tt.oldObjBeforeFunc != nil { @@ -2386,7 +2555,7 @@ func TestHelmChartReconciler_reconcileSourceFromOCI_authStrategy(t *testing.T) { }, }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, "Unknown", "unknown build error: failed to construct Helm client's TLS config: cannot append certificate into certificate pool: invalid CA certificate"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "Unknown", "unknown build error: failed to construct Helm client's TLS config: failed to parse CA certificate"), }, }, { @@ -2439,7 +2608,7 @@ func TestHelmChartReconciler_reconcileSourceFromOCI_authStrategy(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&helmv1.HelmChart{}) + WithStatusSubresource(&sourcev1.HelmChart{}) workspaceDir := t.TempDir() @@ -2457,15 +2626,15 @@ func TestHelmChartReconciler_reconcileSourceFromOCI_authStrategy(t *testing.T) { metadata, err := loadTestChartToOCI(chartData, server, "testdata/certs/client.pem", "testdata/certs/client-key.pem", "testdata/certs/ca.pem") g.Expect(err).ToNot(HaveOccurred()) - repo := &helmv1.HelmRepository{ + repo := &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "auth-strategy-", }, - Spec: helmv1.HelmRepositorySpec{ + Spec: sourcev1.HelmRepositorySpec{ Interval: metav1.Duration{Duration: interval}, Timeout: &metav1.Duration{Duration: timeout}, - Type: helmv1.HelmRepositoryTypeOCI, - Provider: helmv1.GenericOCIProvider, + Type: sourcev1.HelmRepositoryTypeOCI, + Provider: sourcev1.GenericOCIProvider, URL: fmt.Sprintf("oci://%s/testrepo", server.registryHost), Insecure: tt.insecure, }, @@ -2502,15 +2671,15 @@ func TestHelmChartReconciler_reconcileSourceFromOCI_authStrategy(t *testing.T) { clientBuilder.WithObjects(repo) - obj := &helmv1.HelmChart{ + obj := &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "auth-strategy-", }, - Spec: helmv1.HelmChartSpec{ + Spec: sourcev1.HelmChartSpec{ Chart: metadata.Name, Version: metadata.Version, - SourceRef: helmv1.LocalHelmChartSourceReference{ - Kind: helmv1.HelmRepositoryKind, + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: sourcev1.HelmRepositoryKind, Name: repo.Name, }, Interval: metav1.Duration{Duration: interval}, @@ -2564,7 +2733,7 @@ func TestHelmChartRepository_reconcileSource_verifyOCISourceSignature_keyless(t version string want sreconcile.Result wantErr bool - beforeFunc func(obj *helmv1.HelmChart) + beforeFunc func(obj *sourcev1.HelmChart) assertConditions []metav1.Condition revision string }{ @@ -2583,8 +2752,8 @@ func TestHelmChartRepository_reconcileSource_verifyOCISourceSignature_keyless(t name: "signed image with correct subject and issuer should pass verification", version: "6.5.1", want: sreconcile.ResultSuccess, - beforeFunc: func(obj *helmv1.HelmChart) { - obj.Spec.Verify.MatchOIDCIdentity = []helmv1.OIDCIdentityMatch{ + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Verify.MatchOIDCIdentity = []sourcev1.OIDCIdentityMatch{ { Subject: "^https://github.com/stefanprodan/podinfo.*$", @@ -2603,8 +2772,8 @@ func TestHelmChartRepository_reconcileSource_verifyOCISourceSignature_keyless(t name: "signed image with incorrect and correct identity matchers should pass verification", version: "6.5.1", want: sreconcile.ResultSuccess, - beforeFunc: func(obj *helmv1.HelmChart) { - obj.Spec.Verify.MatchOIDCIdentity = []helmv1.OIDCIdentityMatch{ + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Verify.MatchOIDCIdentity = []sourcev1.OIDCIdentityMatch{ { Subject: "intruder", Issuer: "^https://honeypot.com$", @@ -2628,8 +2797,8 @@ func TestHelmChartRepository_reconcileSource_verifyOCISourceSignature_keyless(t version: "6.5.1", wantErr: true, want: sreconcile.ResultEmpty, - beforeFunc: func(obj *helmv1.HelmChart) { - obj.Spec.Verify.MatchOIDCIdentity = []helmv1.OIDCIdentityMatch{ + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Verify.MatchOIDCIdentity = []sourcev1.OIDCIdentityMatch{ { Subject: "intruder", Issuer: "^https://honeypot.com$", @@ -2648,8 +2817,8 @@ func TestHelmChartRepository_reconcileSource_verifyOCISourceSignature_keyless(t wantErr: true, want: sreconcile.ResultEmpty, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ChartVerificationError", "chart verification error: failed to verify : no matching signatures"), - *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "chart verification error: failed to verify : no matching signatures"), + *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ChartVerificationError", "chart verification error: failed to verify : no signatures found"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "chart verification error: failed to verify : no signatures found"), }, revision: "6.1.0@sha256:642383f56ccb529e3f658d40312d01b58d9bc6caeef653da43e58d1afe88982a", }, @@ -2661,15 +2830,15 @@ func TestHelmChartRepository_reconcileSource_verifyOCISourceSignature_keyless(t clientBuilder := fakeclient.NewClientBuilder() - repository := &helmv1.HelmRepository{ + repository := &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "helmrepository-", }, - Spec: helmv1.HelmRepositorySpec{ + Spec: sourcev1.HelmRepositorySpec{ URL: "oci://ghcr.io/stefanprodan/charts", Timeout: &metav1.Duration{Duration: timeout}, - Provider: helmv1.GenericOCIProvider, - Type: helmv1.HelmRepositoryTypeOCI, + Provider: sourcev1.GenericOCIProvider, + Type: sourcev1.HelmRepositoryTypeOCI, }, } clientBuilder.WithObjects(repository) @@ -2683,18 +2852,18 @@ func TestHelmChartRepository_reconcileSource_verifyOCISourceSignature_keyless(t patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), } - obj := &helmv1.HelmChart{ + obj := &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "helmchart-", }, - Spec: helmv1.HelmChartSpec{ - SourceRef: helmv1.LocalHelmChartSourceReference{ - Kind: helmv1.HelmRepositoryKind, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: sourcev1.HelmRepositoryKind, Name: repository.Name, }, Version: tt.version, Chart: "podinfo", - Verify: &helmv1.OCIRepositoryVerification{ + Verify: &sourcev1.OCIRepositoryVerification{ Provider: "cosign", }, }, @@ -2733,7 +2902,7 @@ func TestHelmChartRepository_reconcileSource_verifyOCISourceSignature_keyless(t } } -func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignature(t *testing.T) { +func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignatureNotation(t *testing.T) { g := NewWithT(t) tmpDir := t.TempDir() @@ -2755,14 +2924,352 @@ func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignature(t *testing.T metadata, err := loadTestChartToOCI(chartData, server, "", "", "") g.Expect(err).NotTo(HaveOccurred()) - storage, err := NewStorage(tmpDir, "example.com", retentionTTL, retentionRecords) + opts := &config.Options{ + StoragePath: tmpDir, + StorageAddress: server.registryHost, + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) g.Expect(err).ToNot(HaveOccurred()) - cachedArtifact := &sourcev1.Artifact{ + cachedArtifact := &meta.Artifact{ Revision: "0.1.0", Path: metadata.Name + "-" + metadata.Version + ".tgz", } - g.Expect(storage.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) + g.Expect(st.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) + + certTuple := testhelper.GetRSASelfSignedSigningCertTuple("notation self-signed certs for testing") + certs := []*x509.Certificate{certTuple.Cert} + + sg, err := signer.New(certTuple.PrivateKey, certs) + g.Expect(err).ToNot(HaveOccurred()) + + policyDocument := trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelStrict.Name, Override: map[trustpolicy.ValidationType]trustpolicy.ValidationAction{trustpolicy.TypeRevocation: trustpolicy.ActionSkip}}, + TrustStores: []string{"ca:valid-trust-store"}, + TrustedIdentities: []string{"*"}, + }, + }, + } + + tests := []struct { + name string + shouldSign bool + beforeFunc func(obj *sourcev1.HelmChart) + want sreconcile.Result + wantErr bool + wantErrMsg string + addMultipleCerts bool + provideNoCert bool + provideNoPolicy bool + assertConditions []metav1.Condition + cleanFunc func(g *WithT, build *chart.Build) + }{ + { + name: "unsigned charts should not pass verification", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ + Provider: "notation", + SecretRef: &meta.LocalObjectReference{Name: "notation-config"}, + } + }, + want: sreconcile.ResultEmpty, + wantErr: true, + wantErrMsg: "chart verification error: failed to verify : no signature", + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ChartVerificationError", "chart verification error: failed to verify : no signature"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "chart verification error: failed to verify : no signature"), + }, + }, + { + name: "signed charts should pass verification", + shouldSign: true, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ + Provider: "notation", + SecretRef: &meta.LocalObjectReference{Name: "notation-config"}, + } + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of version "), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "multiple certs should still pass verification", + addMultipleCerts: true, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ + Provider: "notation", + SecretRef: &meta.LocalObjectReference{Name: "notation-config"}, + } + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of version "), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "verify failed before, removed from spec, remove condition", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = nil + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, "VerifyFailed", "fail msg") + obj.Status.Artifact = &meta.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewChart", "pulled '' chart with version ''"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: pulled '' chart with version ''"), + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "no cert provided should not pass verification", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ + Provider: "notation", + SecretRef: &meta.LocalObjectReference{Name: "notation-config"}, + } + }, + wantErr: true, + provideNoCert: true, + // no namespace but the namespace name should appear before the /notation-config + wantErrMsg: "failed to verify the signature using provider 'notation': no certificates found in secret '/notation-config'", + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "Unknown", "failed to verify the signature using provider 'notation': no certificates found in secret '/notation-config'"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider 'notation': no certificates found in secret '/notation-config'"), + }, + }, + { + name: "empty string should fail verification", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = metadata.Name + obj.Spec.Version = metadata.Version + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ + Provider: "notation", + SecretRef: &meta.LocalObjectReference{Name: "notation-config"}, + } + }, + provideNoPolicy: true, + wantErr: true, + wantErrMsg: fmt.Sprintf("failed to verify the signature using provider 'notation': '%s' not found in secret '/notation-config'", snotation.DefaultTrustPolicyKey), + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "Unknown", "failed to verify the signature using provider 'notation': '%s' not found in secret '/notation-config'", snotation.DefaultTrustPolicyKey), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider 'notation': '%s' not found in secret '/notation-config'", snotation.DefaultTrustPolicyKey), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fakeclient.NewClientBuilder() + + repository := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-", + }, + Spec: sourcev1.HelmRepositorySpec{ + URL: fmt.Sprintf("oci://%s/testrepo", server.registryHost), + Timeout: &metav1.Duration{Duration: timeout}, + Provider: sourcev1.GenericOCIProvider, + Type: sourcev1.HelmRepositoryTypeOCI, + Insecure: true, + }, + } + + policy, err := json.Marshal(policyDocument) + g.Expect(err).NotTo(HaveOccurred()) + + data := map[string][]byte{} + + if tt.addMultipleCerts { + data["a.crt"] = testhelper.GetRSASelfSignedSigningCertTuple("a not used for signing").Cert.Raw + data["b.crt"] = testhelper.GetRSASelfSignedSigningCertTuple("b not used for signing").Cert.Raw + data["c.crt"] = testhelper.GetRSASelfSignedSigningCertTuple("c not used for signing").Cert.Raw + } + + if !tt.provideNoCert { + data["notation.crt"] = certTuple.Cert.Raw + } + + if !tt.provideNoPolicy { + data["trustpolicy.json"] = policy + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "notation-config", + }, + Data: data, + } + + caSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-trust-store", + Generation: 1, + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + }, + } + + clientBuilder.WithObjects(repository, secret, caSecret) + + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Getters: testGetters, + Storage: st, + RegistryClientGenerator: registry.ClientGenerator, + patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), + } + + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmchart-", + }, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: sourcev1.HelmRepositoryKind, + Name: repository.Name, + }, + }, + } + + chartUrl := fmt.Sprintf("oci://%s/testrepo/%s:%s", server.registryHost, metadata.Name, metadata.Version) + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + if tt.shouldSign { + artifact := fmt.Sprintf("%s/testrepo/%s:%s", server.registryHost, metadata.Name, metadata.Version) + + remoteRepo, err := oras.NewRepository(artifact) + g.Expect(err).ToNot(HaveOccurred()) + + remoteRepo.PlainHTTP = true + + repo := nr.NewRepository(remoteRepo) + + signatureMediaType := cose.MediaTypeEnvelope + + signOptions := notation.SignOptions{ + SignerSignOptions: notation.SignerSignOptions{ + SignatureMediaType: signatureMediaType, + }, + ArtifactReference: artifact, + } + + _, err = notation.Sign(ctx, sg, repo, signOptions) + g.Expect(err).ToNot(HaveOccurred()) + } + + assertConditions := tt.assertConditions + for k := range assertConditions { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", metadata.Name) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", metadata.Version) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", chartUrl) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", "notation") + } + + var b chart.Build + if tt.cleanFunc != nil { + defer tt.cleanFunc(g, &b) + } + + g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(context.TODO(), obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + got, err := r.reconcileSource(ctx, sp, obj, &b) + if tt.wantErr { + tt.wantErrMsg = strings.ReplaceAll(tt.wantErrMsg, "", chartUrl) + g.Expect(err).ToNot(BeNil()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErrMsg)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignatureCosign(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + server, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + const ( + chartPath = "testdata/charts/helmchart-0.1.0.tgz" + ) + + // Load a test chart + chartData, err := os.ReadFile(chartPath) + g.Expect(err).ToNot(HaveOccurred()) + + // Upload the test chart + metadata, err := loadTestChartToOCI(chartData, server, "", "", "") + g.Expect(err).NotTo(HaveOccurred()) + + opts := &config.Options{ + StoragePath: tmpDir, + StorageAddress: server.registryHost, + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) + g.Expect(err).ToNot(HaveOccurred()) + + cachedArtifact := &meta.Artifact{ + Revision: "0.1.0", + Path: metadata.Name + "-" + metadata.Version + ".tgz", + } + g.Expect(st.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) pf := func(b bool) ([]byte, error) { return []byte("cosign-password"), nil @@ -2782,7 +3289,7 @@ func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignature(t *testing.T tests := []struct { name string shouldSign bool - beforeFunc func(obj *helmv1.HelmChart) + beforeFunc func(obj *sourcev1.HelmChart) want sreconcile.Result wantErr bool wantErrMsg string @@ -2791,45 +3298,45 @@ func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignature(t *testing.T }{ { name: "unsigned charts should not pass verification", - beforeFunc: func(obj *helmv1.HelmChart) { + beforeFunc: func(obj *sourcev1.HelmChart) { obj.Spec.Chart = metadata.Name obj.Spec.Version = metadata.Version - obj.Spec.Verify = &helmv1.OCIRepositoryVerification{ + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ Provider: "cosign", SecretRef: &meta.LocalObjectReference{Name: "cosign-key"}, } }, want: sreconcile.ResultEmpty, wantErr: true, - wantErrMsg: "chart verification error: failed to verify : no matching signatures", + wantErrMsg: "chart verification error: failed to verify : no signatures found", assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ChartVerificationError", "chart verification error: failed to verify : no matching signatures"), - *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "chart verification error: failed to verify : no matching signatures"), + *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ChartVerificationError", "chart verification error: failed to verify : no signatures found"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "chart verification error: failed to verify : no signatures found"), }, }, { name: "unsigned charts should not pass keyless verification", - beforeFunc: func(obj *helmv1.HelmChart) { + beforeFunc: func(obj *sourcev1.HelmChart) { obj.Spec.Chart = metadata.Name obj.Spec.Version = metadata.Version - obj.Spec.Verify = &helmv1.OCIRepositoryVerification{ + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ Provider: "cosign", } }, want: sreconcile.ResultEmpty, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ChartVerificationError", "chart verification error: failed to verify : no matching signatures"), - *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "chart verification error: failed to verify : no matching signatures"), + *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ChartVerificationError", "chart verification error: failed to verify : no signatures found"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "chart verification error: failed to verify : no signatures found"), }, }, { name: "signed charts should pass verification", shouldSign: true, - beforeFunc: func(obj *helmv1.HelmChart) { + beforeFunc: func(obj *sourcev1.HelmChart) { obj.Spec.Chart = metadata.Name obj.Spec.Version = metadata.Version - obj.Spec.Verify = &helmv1.OCIRepositoryVerification{ + obj.Spec.Verify = &sourcev1.OCIRepositoryVerification{ Provider: "cosign", SecretRef: &meta.LocalObjectReference{Name: "cosign-key"}, } @@ -2846,12 +3353,12 @@ func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignature(t *testing.T }, { name: "verify failed before, removed from spec, remove condition", - beforeFunc: func(obj *helmv1.HelmChart) { + beforeFunc: func(obj *sourcev1.HelmChart) { obj.Spec.Chart = metadata.Name obj.Spec.Version = metadata.Version obj.Spec.Verify = nil conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, "VerifyFailed", "fail msg") - obj.Status.Artifact = &sourcev1.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} + obj.Status.Artifact = &meta.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} }, want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ @@ -2871,15 +3378,15 @@ func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignature(t *testing.T clientBuilder := fakeclient.NewClientBuilder() - repository := &helmv1.HelmRepository{ + repository := &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "helmrepository-", }, - Spec: helmv1.HelmRepositorySpec{ + Spec: sourcev1.HelmRepositorySpec{ URL: fmt.Sprintf("oci://%s/testrepo", server.registryHost), Timeout: &metav1.Duration{Duration: timeout}, - Provider: helmv1.GenericOCIProvider, - Type: helmv1.HelmRepositoryTypeOCI, + Provider: sourcev1.GenericOCIProvider, + Type: sourcev1.HelmRepositoryTypeOCI, Insecure: true, }, } @@ -2898,18 +3405,18 @@ func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignature(t *testing.T Client: clientBuilder.Build(), EventRecorder: record.NewFakeRecorder(32), Getters: testGetters, - Storage: storage, + Storage: st, RegistryClientGenerator: registry.ClientGenerator, patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), } - obj := &helmv1.HelmChart{ + obj := &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "helmchart-", }, - Spec: helmv1.HelmChartSpec{ - SourceRef: helmv1.LocalHelmChartSourceReference{ - Kind: helmv1.HelmRepositoryKind, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: sourcev1.HelmRepositoryKind, Name: repository.Name, }, }, diff --git a/internal/controller/helmrepository_controller.go b/internal/controller/helmrepository_controller.go index d48b3c2f8..06c4494cf 100644 --- a/internal/controller/helmrepository_controller.go +++ b/internal/controller/helmrepository_controller.go @@ -32,15 +32,18 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" kuberecorder "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/ratelimiter" + "sigs.k8s.io/controller-runtime/pkg/reconcile" eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" "github.com/fluxcd/pkg/apis/meta" + intdigest "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" "github.com/fluxcd/pkg/runtime/conditions" helper "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/jitter" @@ -49,9 +52,7 @@ import ( rreconcile "github.com/fluxcd/pkg/runtime/reconcile" sourcev1 "github.com/fluxcd/source-controller/api/v1" - helmv1 "github.com/fluxcd/source-controller/api/v1beta2" "github.com/fluxcd/source-controller/internal/cache" - intdigest "github.com/fluxcd/source-controller/internal/digest" serror "github.com/fluxcd/source-controller/internal/error" "github.com/fluxcd/source-controller/internal/helm/getter" "github.com/fluxcd/source-controller/internal/helm/repository" @@ -61,7 +62,7 @@ import ( ) // helmRepositoryReadyCondition contains the information required to summarize a -// v1beta2.HelmRepository Ready Condition. +// v1.HelmRepository Ready Condition. var helmRepositoryReadyCondition = summarize.Conditions{ Target: meta.ReadyCondition, Owned: []string{ @@ -102,14 +103,14 @@ var helmRepositoryFailConditions = []string{ // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories/finalizers,verbs=get;create;update;patch;delete // +kubebuilder:rbac:groups="",resources=events,verbs=create;patch -// HelmRepositoryReconciler reconciles a v1beta2.HelmRepository object. +// HelmRepositoryReconciler reconciles a v1.HelmRepository object. type HelmRepositoryReconciler struct { client.Client kuberecorder.EventRecorder helper.Metrics Getters helmgetter.Providers - Storage *Storage + Storage *storage.Storage ControllerName string Cache *cache.Cache @@ -120,14 +121,14 @@ type HelmRepositoryReconciler struct { } type HelmRepositoryReconcilerOptions struct { - RateLimiter ratelimiter.RateLimiter + RateLimiter workqueue.TypedRateLimiter[reconcile.Request] } // helmRepositoryReconcileFunc is the function type for all the -// v1beta2.HelmRepository (sub)reconcile functions. The type implementations +// v1.HelmRepository (sub)reconcile functions. The type implementations // are grouped and executed serially to perform the complete reconcile of the // object. -type helmRepositoryReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *helmv1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) +type helmRepositoryReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *meta.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) func (r *HelmRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error { return r.SetupWithManagerAndOptions(mgr, HelmRepositoryReconcilerOptions{}) @@ -137,7 +138,7 @@ func (r *HelmRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, r.patchOptions = getPatchOptions(helmRepositoryReadyCondition.Owned, r.ControllerName) return ctrl.NewControllerManagedBy(mgr). - For(&helmv1.HelmRepository{}). + For(&sourcev1.HelmRepository{}). WithEventFilter( predicate.And( intpredicates.HelmRepositoryOCIMigrationPredicate{}, @@ -155,7 +156,7 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque log := ctrl.LoggerFrom(ctx) // Fetch the HelmRepository - obj := &helmv1.HelmRepository{} + obj := &sourcev1.HelmRepository{} if err := r.Get(ctx, req.NamespacedName, obj); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } @@ -164,7 +165,7 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque serialPatcher := patch.NewSerialPatcher(obj, r.Client) // If it's of type OCI, migrate the object to static. - if obj.Spec.Type == helmv1.HelmRepositoryTypeOCI { + if obj.Spec.Type == sourcev1.HelmRepositoryTypeOCI { return r.migrationToStatic(ctx, serialPatcher, obj) } @@ -191,9 +192,7 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque } result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) - // Always record suspend, readiness and duration metrics. - r.Metrics.RecordSuspend(ctx, obj, obj.Spec.Suspend) - r.Metrics.RecordReadiness(ctx, obj) + // Always record duration metrics. r.Metrics.RecordDuration(ctx, obj, start) }() @@ -234,7 +233,7 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque // object. It returns early on the first call that returns // reconcile.ResultRequeue, or produces an error. func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, - obj *helmv1.HelmRepository, reconcilers []helmRepositoryReconcileFunc) (sreconcile.Result, error) { + obj *sourcev1.HelmRepository, reconcilers []helmRepositoryReconcileFunc) (sreconcile.Result, error) { oldObj := obj.DeepCopy() rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress") @@ -259,7 +258,7 @@ func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, sp *patch.Seri } var chartRepo repository.ChartRepository - var artifact sourcev1.Artifact + var artifact meta.Artifact // Run the sub-reconcilers and build the result of reconciliation. var res sreconcile.Result @@ -287,7 +286,7 @@ func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, sp *patch.Seri } // notify emits notification related to the reconciliation. -func (r *HelmRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *helmv1.HelmRepository, chartRepo *repository.ChartRepository, res sreconcile.Result, resErr error) { +func (r *HelmRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.HelmRepository, chartRepo *repository.ChartRepository, res sreconcile.Result, resErr error) { // Notify successful reconciliation for new artifact and recovery from any // failure. if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil { @@ -331,7 +330,7 @@ func (r *HelmRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *h // The hostname of any URL in the Status of the object are updated, to ensure // they match the Storage server hostname of current runtime. func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, - obj *helmv1.HelmRepository, _ *sourcev1.Artifact, _ *repository.ChartRepository) (sreconcile.Result, error) { + obj *sourcev1.HelmRepository, _ *meta.Artifact, _ *repository.ChartRepository) (sreconcile.Result, error) { // Garbage collect previous advertised artifact(s) from storage _ = r.garbageCollect(ctx, obj) @@ -369,7 +368,7 @@ func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, sp *pat if artifactMissing { msg += ": disappeared from storage" } - rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, msg) + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg) conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) @@ -386,15 +385,15 @@ func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, sp *pat } // reconcileSource attempts to fetch the Helm repository index using the -// specified configuration on the v1beta2.HelmRepository object. +// specified configuration on the v1.HelmRepository object. // -// When the fetch fails, it records v1beta2.FetchFailedCondition=True and +// When the fetch fails, it records v1.FetchFailedCondition=True and // returns early. // If successful and the index is valid, any previous -// v1beta2.FetchFailedCondition is removed, and the repository.ChartRepository +// v1.FetchFailedCondition is removed, and the repository.ChartRepository // pointer is set to the newly fetched index. func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, - obj *helmv1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { + obj *sourcev1.HelmRepository, artifact *meta.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { // Ensure it's not an OCI URL. API validation ensures that only // http/https/oci scheme are allowed. if strings.HasPrefix(obj.Spec.URL, helmreg.OCIScheme) { @@ -403,7 +402,7 @@ func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patc fmt.Errorf("invalid Helm repository URL: %w", err), sourcev1.URLInvalidReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } @@ -413,7 +412,7 @@ func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patc fmt.Errorf("invalid Helm repository URL: %w", err), sourcev1.URLInvalidReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } @@ -427,7 +426,7 @@ func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patc err, sourcev1.AuthenticationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } } @@ -441,14 +440,14 @@ func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patc fmt.Errorf("invalid Helm repository URL: %w", err), sourcev1.URLInvalidReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e default: e := serror.NewStalling( fmt.Errorf("failed to construct Helm client: %w", err), meta.FailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } } @@ -459,7 +458,7 @@ func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patc fmt.Errorf("failed to fetch Helm repository index: %w", err), meta.FailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) // Coin flip on transient or persistent error, return error and hope for the best return sreconcile.ResultEmpty, e } @@ -483,9 +482,9 @@ func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patc if err := chartRepo.LoadFromPath(); err != nil { e := serror.NewGeneric( fmt.Errorf("failed to load Helm repository from index YAML: %w", err), - helmv1.IndexationFailedReason, + sourcev1.IndexationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } // Delete any stale failure observation @@ -496,16 +495,16 @@ func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patc if revision.Validate() != nil { e := serror.NewGeneric( fmt.Errorf("failed to calculate revision: %w", err), - helmv1.IndexationFailedReason, + sourcev1.IndexationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } // Mark observations about the revision on the object. message := fmt.Sprintf("new index revision '%s'", revision) if obj.GetArtifact() != nil { - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "%s", message) } rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message) if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { @@ -526,12 +525,12 @@ func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patc // (Status) data on the object does not match the given. // // The inspection of the given data to the object is differed, ensuring any -// stale observations like v1beta2.ArtifactOutdatedCondition are removed. +// stale observations like v1.ArtifactOutdatedCondition are removed. // If the given Artifact does not differ from the object's current, it returns // early. // On a successful archive, the Artifact in the Status of the object is set, // and the symlink in the Storage is updated to its path. -func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, obj *helmv1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { +func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *meta.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { // Set the ArtifactInStorageCondition if there's no drift. defer func() { if obj.GetArtifact().HasRevision(artifact.Revision) { @@ -560,7 +559,7 @@ func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pa fmt.Errorf("failed to create artifact directory: %w", err), sourcev1.DirCreationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } @@ -581,7 +580,7 @@ func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pa fmt.Errorf("unable to get JSON index from chart repo: %w", err), sourcev1.ArchiveOperationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } if err = r.Storage.Copy(artifact, bytes.NewBuffer(b)); err != nil { @@ -589,7 +588,7 @@ func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pa fmt.Errorf("unable to save artifact to storage: %w", err), sourcev1.ArchiveOperationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } @@ -623,7 +622,7 @@ func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pa // reconcileDelete handles the deletion of the object. // It first garbage collects all Artifacts for the object from the Storage. // Removing the finalizer from the object if successful. -func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, obj *helmv1.HelmRepository) (sreconcile.Result, error) { +func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.HelmRepository) (sreconcile.Result, error) { // Garbage collect the resource's artifacts if err := r.garbageCollect(ctx, obj); err != nil { // Return the error so we retry the failed garbage collection @@ -651,8 +650,8 @@ func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, obj *hel // - the deletion timestamp on the object is set // - the obj.Spec.Type has changed and artifacts are not supported by the new type // Which will result in the removal of all Artifacts for the objects. -func (r *HelmRepositoryReconciler) garbageCollect(ctx context.Context, obj *helmv1.HelmRepository) error { - if !obj.DeletionTimestamp.IsZero() || (obj.Spec.Type != "" && obj.Spec.Type != helmv1.HelmRepositoryTypeDefault) { +func (r *HelmRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.HelmRepository) error { + if !obj.DeletionTimestamp.IsZero() || (obj.Spec.Type != "" && obj.Spec.Type != sourcev1.HelmRepositoryTypeDefault) { if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { return serror.NewGeneric( fmt.Errorf("garbage collection for deleted resource failed: %w", err), @@ -679,7 +678,7 @@ func (r *HelmRepositoryReconciler) garbageCollect(ctx context.Context, obj *helm } if len(delFiles) > 0 { r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", - fmt.Sprintf("garbage collected %d artifacts", len(delFiles))) + "garbage collected %d artifacts", len(delFiles)) return nil } } @@ -703,7 +702,7 @@ func (r *HelmRepositoryReconciler) eventLogf(ctx context.Context, obj runtime.Ob } // migrateToStatic is HelmRepository OCI migration to static object. -func (r *HelmRepositoryReconciler) migrationToStatic(ctx context.Context, sp *patch.SerialPatcher, obj *helmv1.HelmRepository) (result ctrl.Result, err error) { +func (r *HelmRepositoryReconciler) migrationToStatic(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository) (result ctrl.Result, err error) { // Skip migration if suspended and not being deleted. if obj.Spec.Suspend && obj.DeletionTimestamp.IsZero() { return ctrl.Result{}, nil @@ -721,7 +720,7 @@ func (r *HelmRepositoryReconciler) migrationToStatic(ctx context.Context, sp *pa } // Delete finalizer and reset the status. controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) - obj.Status = helmv1.HelmRepositoryStatus{} + obj.Status = sourcev1.HelmRepositoryStatus{} if err := sp.Patch(ctx, obj); err != nil { return ctrl.Result{}, err diff --git a/internal/controller/helmrepository_controller_test.go b/internal/controller/helmrepository_controller_test.go index 0da154a15..d76c58a42 100644 --- a/internal/controller/helmrepository_controller_test.go +++ b/internal/controller/helmrepository_controller_test.go @@ -18,7 +18,6 @@ package controller import ( "context" - "crypto/tls" "encoding/json" "errors" "fmt" @@ -44,21 +43,20 @@ import ( kstatus "github.com/fluxcd/cli-utils/pkg/kstatus/status" "github.com/fluxcd/pkg/apis/meta" + intdigest "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" "github.com/fluxcd/pkg/helmtestserver" "github.com/fluxcd/pkg/runtime/conditions" conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check" "github.com/fluxcd/pkg/runtime/patch" + "github.com/fluxcd/pkg/runtime/secrets" sourcev1 "github.com/fluxcd/source-controller/api/v1" - helmv1 "github.com/fluxcd/source-controller/api/v1beta2" "github.com/fluxcd/source-controller/internal/cache" - intdigest "github.com/fluxcd/source-controller/internal/digest" - "github.com/fluxcd/source-controller/internal/helm/getter" "github.com/fluxcd/source-controller/internal/helm/repository" intpredicates "github.com/fluxcd/source-controller/internal/predicates" sreconcile "github.com/fluxcd/source-controller/internal/reconcile" "github.com/fluxcd/source-controller/internal/reconcile/summarize" - stls "github.com/fluxcd/source-controller/internal/tls" ) func TestHelmRepositoryReconciler_deleteBeforeFinalizer(t *testing.T) { @@ -73,10 +71,10 @@ func TestHelmRepositoryReconciler_deleteBeforeFinalizer(t *testing.T) { g.Expect(k8sClient.Delete(ctx, namespace)).NotTo(HaveOccurred()) }) - helmrepo := &helmv1.HelmRepository{} + helmrepo := &sourcev1.HelmRepository{} helmrepo.Name = "test-helmrepo" helmrepo.Namespace = namespaceName - helmrepo.Spec = helmv1.HelmRepositorySpec{ + helmrepo.Spec = sourcev1.HelmRepositorySpec{ Interval: metav1.Duration{Duration: interval}, URL: "https://example.com", } @@ -109,12 +107,12 @@ func TestHelmRepositoryReconciler_Reconcile(t *testing.T) { testServer.Start() defer testServer.Stop() - origObj := &helmv1.HelmRepository{ + origObj := &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "helmrepository-reconcile-", Namespace: "default", }, - Spec: helmv1.HelmRepositorySpec{ + Spec: sourcev1.HelmRepositorySpec{ Interval: metav1.Duration{Duration: interval}, URL: testServer.URL(), }, @@ -175,20 +173,20 @@ func TestHelmRepositoryReconciler_Reconcile(t *testing.T) { func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { tests := []struct { name string - beforeFunc func(obj *helmv1.HelmRepository, storage *Storage) error + beforeFunc func(obj *sourcev1.HelmRepository, storage *storage.Storage) error want sreconcile.Result wantErr bool - assertArtifact *sourcev1.Artifact + assertArtifact *meta.Artifact assertConditions []metav1.Condition assertPaths []string }{ { name: "garbage collects", - beforeFunc: func(obj *helmv1.HelmRepository, storage *Storage) error { + beforeFunc: func(obj *sourcev1.HelmRepository, storage *storage.Storage) error { revisions := []string{"a", "b", "c", "d"} for n := range revisions { v := revisions[n] - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), Revision: v, } @@ -206,7 +204,7 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") return nil }, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Path: "/reconcile-storage/d.txt", Revision: "d", Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", @@ -234,8 +232,8 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { }, { name: "notices missing artifact in storage", - beforeFunc: func(obj *helmv1.HelmRepository, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.HelmRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ Path: "/reconcile-storage/invalid.txt", Revision: "d", } @@ -253,10 +251,10 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { }, { name: "notices empty artifact digest", - beforeFunc: func(obj *helmv1.HelmRepository, storage *Storage) error { + beforeFunc: func(obj *sourcev1.HelmRepository, storage *storage.Storage) error { f := "empty-digest.txt" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), Revision: "fake", } @@ -284,10 +282,10 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { }, { name: "notices artifact digest mismatch", - beforeFunc: func(obj *helmv1.HelmRepository, storage *Storage) error { + beforeFunc: func(obj *sourcev1.HelmRepository, storage *storage.Storage) error { f := "digest-mismatch.txt" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), Revision: "fake", } @@ -315,8 +313,8 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { }, { name: "updates hostname on diff from current", - beforeFunc: func(obj *helmv1.HelmRepository, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.HelmRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ Path: "/reconcile-storage/hostname.txt", Revision: "f", Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", @@ -335,7 +333,7 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { assertPaths: []string{ "/reconcile-storage/hostname.txt", }, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Path: "/reconcile-storage/hostname.txt", Revision: "f", Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", @@ -354,14 +352,14 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { r := &HelmRepositoryReconciler{ Client: fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&helmv1.HelmRepository{}). + WithStatusSubresource(&sourcev1.HelmRepository{}). Build(), EventRecorder: record.NewFakeRecorder(32), Storage: testStorage, patchOptions: getPatchOptions(helmRepositoryReadyCondition.Owned, "sc"), } - obj := &helmv1.HelmRepository{ + obj := &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test-", Generation: 1, @@ -377,7 +375,7 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { }() var chartRepo repository.ChartRepository - var artifact sourcev1.Artifact + var artifact meta.Artifact sp := patch.NewSerialPatcher(obj, r.Client) got, err := r.reconcileStorage(context.TODO(), sp, obj, &artifact, &chartRepo) @@ -421,26 +419,29 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { server options url string secret *corev1.Secret - beforeFunc func(t *WithT, obj *helmv1.HelmRepository, rev digest.Digest) - afterFunc func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) + beforeFunc func(t *WithT, obj *sourcev1.HelmRepository) + revFunc func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest + afterFunc func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) want sreconcile.Result wantErr bool assertConditions []metav1.Condition }{ { - name: "HTTPS with certSecretRef pointing to CA cert but public repo URL succeeds", + name: "HTTPS with certSecretRef non-matching CA succeeds via system CA pool", protocol: "http", url: "https://stefanprodan.github.io/podinfo", want: sreconcile.ResultSuccess, + wantErr: false, secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "ca-file", + Name: "ca-file", + Namespace: "default", }, Data: map[string][]byte{ "ca.crt": tlsCA, }, }, - beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev digest.Digest) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.CertSecretRef = &meta.LocalObjectReference{Name: "ca-file"} }, assertConditions: []metav1.Condition{ @@ -458,21 +459,43 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { }, secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "ca-file", + Name: "ca-file", + Namespace: "default", }, Data: map[string][]byte{ "ca.crt": tlsCA, }, }, - beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev digest.Digest) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.CertSecretRef = &meta.LocalObjectReference{Name: "ca-file"} }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + tlsConfig, err := secrets.TLSConfigFromSecret(context.TODO(), secret) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, tlsConfig, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), }, - afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { t.Expect(chartRepo.Path).ToNot(BeEmpty()) t.Expect(chartRepo.Index).ToNot(BeNil()) t.Expect(artifact.Revision).ToNot(BeEmpty()) @@ -488,21 +511,43 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { }, secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "ca-file", + Name: "ca-file", + Namespace: "default", }, Data: map[string][]byte{ "caFile": tlsCA, }, }, - beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev digest.Digest) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"} }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + tlsConfig, err := secrets.TLSConfigFromSecret(context.TODO(), secret) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, tlsConfig, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), }, - afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { t.Expect(chartRepo.Path).ToNot(BeEmpty()) t.Expect(chartRepo.Index).ToNot(BeNil()) t.Expect(artifact.Revision).ToNot(BeEmpty()) @@ -519,22 +564,44 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { }, secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "ca-file", + Name: "ca-file", + Namespace: "default", }, Data: map[string][]byte{ "caFile": tlsCA, }, Type: corev1.SecretTypeDockerConfigJson, }, - beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev digest.Digest) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"} }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + tlsConfig, err := secrets.TLSConfigFromSecret(context.TODO(), secret) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, tlsConfig, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), }, - afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { t.Expect(chartRepo.Path).ToNot(BeEmpty()) t.Expect(chartRepo.Index).ToNot(BeNil()) t.Expect(artifact.Revision).ToNot(BeEmpty()) @@ -543,12 +610,30 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { { name: "HTTP without secretRef makes ArtifactOutdated=True", protocol: "http", - want: sreconcile.ResultSuccess, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, nil, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), }, - afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { t.Expect(chartRepo.Path).ToNot(BeEmpty()) t.Expect(chartRepo.Index).ToNot(BeNil()) t.Expect(artifact.Revision).ToNot(BeEmpty()) @@ -563,22 +648,45 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { }, secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "basic-auth", + Name: "basic-auth", + Namespace: "default", }, Data: map[string][]byte{ "username": []byte("git"), "password": []byte("1234"), }, }, - beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev digest.Digest) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"} }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + basicAuth, err := secrets.BasicAuthFromSecret(context.TODO(), secret) + t.Expect(err).ToNot(HaveOccurred()) + + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + helmgetter.WithBasicAuth(basicAuth.Username, basicAuth.Password), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, nil, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), }, - afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { t.Expect(chartRepo.Path).ToNot(BeEmpty()) t.Expect(chartRepo.Index).ToNot(BeNil()) t.Expect(artifact.Revision).ToNot(BeEmpty()) @@ -594,7 +702,8 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { }, secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "basic-auth", + Name: "basic-auth", + Namespace: "default", }, Data: map[string][]byte{ "username": []byte("git"), @@ -602,15 +711,37 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { }, Type: corev1.SecretTypeDockerConfigJson, }, - beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev digest.Digest) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"} }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + basicAuth, err := secrets.BasicAuthFromSecret(context.TODO(), secret) + t.Expect(err).ToNot(HaveOccurred()) + + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + helmgetter.WithBasicAuth(basicAuth.Username, basicAuth.Password), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, nil, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), }, - afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { t.Expect(chartRepo.Path).ToNot(BeEmpty()) t.Expect(chartRepo.Index).ToNot(BeNil()) t.Expect(artifact.Revision).ToNot(BeEmpty()) @@ -626,24 +757,25 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { }, secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "invalid-ca", + Name: "invalid-ca", + Namespace: "default", }, Data: map[string][]byte{ "ca.crt": []byte("invalid"), }, }, - beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev digest.Digest) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.CertSecretRef = &meta.LocalObjectReference{Name: "invalid-ca"} conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "cannot append certificate into certificate pool: invalid CA certificate"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to construct Helm client's TLS config: failed to parse CA certificate"), *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, - afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { // No repo index due to fetch fail. t.Expect(chartRepo.Path).To(BeEmpty()) t.Expect(chartRepo.Index).To(BeNil()) @@ -653,7 +785,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { { name: "Invalid URL makes FetchFailed=True and returns stalling error", protocol: "http", - beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev digest.Digest) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.URL = strings.ReplaceAll(obj.Spec.URL, "http://", "") conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") @@ -665,7 +797,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, - afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { // No repo index due to fetch fail. t.Expect(chartRepo.Path).To(BeEmpty()) t.Expect(chartRepo.Index).To(BeNil()) @@ -675,7 +807,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { { name: "Unsupported scheme makes FetchFailed=True and returns stalling error", protocol: "http", - beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev digest.Digest) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.URL = strings.ReplaceAll(obj.Spec.URL, "http://", "ftp://") conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") @@ -687,7 +819,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, - afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { // No repo index due to fetch fail. t.Expect(chartRepo.Path).To(BeEmpty()) t.Expect(chartRepo.Index).To(BeNil()) @@ -697,7 +829,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { { name: "Missing secret returns FetchFailed=True and returns error", protocol: "http", - beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev digest.Digest) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "non-existing"} conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") @@ -708,7 +840,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, - afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { // No repo index due to fetch fail. t.Expect(chartRepo.Path).To(BeEmpty()) t.Expect(chartRepo.Index).To(BeNil()) @@ -720,24 +852,25 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { protocol: "http", secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "malformed-basic-auth", + Name: "malformed-basic-auth", + Namespace: "default", }, Data: map[string][]byte{ "username": []byte("git"), }, }, - beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev digest.Digest) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "malformed-basic-auth"} conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "required fields 'username' and 'password"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "secret 'default/malformed-basic-auth': malformed basic auth - has 'username' but missing 'password'"), *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, - afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { // No repo index due to fetch fail. t.Expect(chartRepo.Path).To(BeEmpty()) t.Expect(chartRepo.Index).To(BeNil()) @@ -747,20 +880,34 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { { name: "Stored index with same revision", protocol: "http", - beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev digest.Digest) { - obj.Status.Artifact = &sourcev1.Artifact{ - Revision: rev.String(), - } - + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "foo", "bar") }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, nil, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, - afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { t.Expect(chartRepo.Path).ToNot(BeEmpty()) t.Expect(chartRepo.Index).To(BeNil()) @@ -771,8 +918,8 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { { name: "Stored index with different revision", protocol: "http", - beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev digest.Digest) { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{ Revision: "80bb3dd67c63095d985850459834ea727603727a370079de90d221191d375a86", } conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") @@ -784,7 +931,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), }, - afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { t.Expect(chartRepo.Path).ToNot(BeEmpty()) t.Expect(chartRepo.Index).ToNot(BeNil()) @@ -796,8 +943,8 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { { name: "Existing artifact makes ArtifactOutdated=True", protocol: "http", - beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev digest.Digest) { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{ Path: "some-path", Revision: "some-rev", } @@ -812,12 +959,13 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { } for _, tt := range tests { - obj := &helmv1.HelmRepository{ + obj := &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "auth-strategy-", Generation: 1, + Namespace: "default", }, - Spec: helmv1.HelmRepositorySpec{ + Spec: sourcev1.HelmRepositorySpec{ Interval: metav1.Duration{Duration: interval}, Timeout: &metav1.Duration{Duration: timeout}, }, @@ -868,54 +1016,15 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&helmv1.HelmRepository{}) + WithStatusSubresource(&sourcev1.HelmRepository{}) if secret != nil { clientBuilder.WithObjects(secret.DeepCopy()) } - // Calculate the artifact digest for valid repos configurations. - getterOpts := []helmgetter.Option{ - helmgetter.WithURL(server.URL()), - } - var newChartRepo *repository.ChartRepository - var tlsConf *tls.Config - validSecret := true - if secret != nil { - // Extract the client options from secret, ignoring any invalid - // value. validSecret is used to determine if the index digest - // should be calculated below. - var gOpts []helmgetter.Option - var serr error - gOpts, serr = getter.GetterOptionsFromSecret(*secret) - if serr != nil { - validSecret = false - } - getterOpts = append(getterOpts, gOpts...) - repoURL := server.URL() - if tt.url != "" { - repoURL = tt.url - } - tlsConf, _, serr = stls.KubeTLSClientConfigFromSecret(*secret, repoURL) - if serr != nil { - validSecret = false - } - if tlsConf == nil { - tlsConf, _, serr = stls.TLSClientConfigFromSecret(*secret, repoURL) - if serr != nil { - validSecret = false - } - } - newChartRepo, err = repository.NewChartRepository(obj.Spec.URL, "", testGetters, tlsConf, getterOpts...) - } else { - newChartRepo, err = repository.NewChartRepository(obj.Spec.URL, "", testGetters, nil) - } - g.Expect(err).ToNot(HaveOccurred()) - var rev digest.Digest - if validSecret { - g.Expect(newChartRepo.CacheIndex()).To(Succeed()) - rev = newChartRepo.Digest(intdigest.Canonical) + if tt.revFunc != nil { + rev = tt.revFunc(g, server, secret) } r := &HelmRepositoryReconciler{ @@ -926,7 +1035,14 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { patchOptions: getPatchOptions(helmRepositoryReadyCondition.Owned, "sc"), } if tt.beforeFunc != nil { - tt.beforeFunc(g, obj, rev) + tt.beforeFunc(g, obj) + } + + // Special handling for tests that need to set revision after calculation + if tt.name == "Stored index with same revision" && rev != "" { + obj.Status.Artifact = &meta.Artifact{ + Revision: rev.String(), + } } g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) @@ -935,7 +1051,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { }() var chartRepo repository.ChartRepository - var artifact sourcev1.Artifact + var artifact meta.Artifact sp := patch.NewSerialPatcher(obj, r.Client) got, err := r.reconcileSource(context.TODO(), sp, obj, &artifact, &chartRepo) @@ -960,19 +1076,19 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { tests := []struct { name string cache *cache.Cache - beforeFunc func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) - afterFunc func(t *WithT, obj *helmv1.HelmRepository, cache *cache.Cache) + beforeFunc func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) + afterFunc func(t *WithT, obj *sourcev1.HelmRepository, cache *cache.Cache) want sreconcile.Result wantErr bool assertConditions []metav1.Condition }{ { name: "Archiving artifact to storage makes ArtifactInStorage=True and artifact is stored as JSON", - beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) { obj.Spec.Interval = metav1.Duration{Duration: interval} }, want: sreconcile.ResultSuccess, - afterFunc: func(t *WithT, obj *helmv1.HelmRepository, cache *cache.Cache) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, cache *cache.Cache) { localPath := testStorage.LocalPath(*obj.GetArtifact()) b, err := os.ReadFile(localPath) t.Expect(err).To(Not(HaveOccurred())) @@ -985,7 +1101,7 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { { name: "Archiving (loaded) artifact to storage adds to cache", cache: cache.New(10, time.Minute), - beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) { index.Index = &repo.IndexFile{ APIVersion: "v1", Generated: time.Now(), @@ -993,7 +1109,7 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { obj.Spec.Interval = metav1.Duration{Duration: interval} }, want: sreconcile.ResultSuccess, - afterFunc: func(t *WithT, obj *helmv1.HelmRepository, cache *cache.Cache) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, cache *cache.Cache) { i, ok := cache.Get(obj.GetArtifact().Path) t.Expect(ok).To(BeTrue()) t.Expect(i).To(BeAssignableToTypeOf(&repo.IndexFile{})) @@ -1004,11 +1120,11 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { }, { name: "Up-to-date artifact should not update status", - beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) { obj.Spec.Interval = metav1.Duration{Duration: interval} obj.Status.Artifact = artifact.DeepCopy() }, - afterFunc: func(t *WithT, obj *helmv1.HelmRepository, _ *cache.Cache) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, _ *cache.Cache) { t.Expect(obj.Status.URL).To(BeEmpty()) }, want: sreconcile.ResultSuccess, @@ -1018,7 +1134,7 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { }, { name: "Removes ArtifactOutdatedCondition after creating a new artifact", - beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) { obj.Spec.Interval = metav1.Duration{Duration: interval} conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") }, @@ -1029,10 +1145,10 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { }, { name: "Creates latest symlink to the created artifact", - beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) { obj.Spec.Interval = metav1.Duration{Duration: interval} }, - afterFunc: func(t *WithT, obj *helmv1.HelmRepository, _ *cache.Cache) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, _ *cache.Cache) { localPath := testStorage.LocalPath(*obj.GetArtifact()) symlinkPath := filepath.Join(filepath.Dir(localPath), "index.yaml") targetFile, err := os.Readlink(symlinkPath) @@ -1053,7 +1169,7 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { r := &HelmRepositoryReconciler{ Client: fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&helmv1.HelmRepository{}). + WithStatusSubresource(&sourcev1.HelmRepository{}). Build(), EventRecorder: record.NewFakeRecorder(32), Storage: testStorage, @@ -1062,16 +1178,16 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { patchOptions: getPatchOptions(helmRepositoryReadyCondition.Owned, "sc"), } - obj := &helmv1.HelmRepository{ + obj := &sourcev1.HelmRepository{ TypeMeta: metav1.TypeMeta{ - Kind: helmv1.HelmRepositoryKind, + Kind: sourcev1.HelmRepositoryKind, }, ObjectMeta: metav1.ObjectMeta{ GenerateName: "test-bucket-", Generation: 1, Namespace: "default", }, - Spec: helmv1.HelmRepositorySpec{ + Spec: sourcev1.HelmRepositorySpec{ Timeout: &metav1.Duration{Duration: timeout}, URL: "https://example.com/index.yaml", }, @@ -1111,7 +1227,7 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { func TestHelmRepositoryReconciler_reconcileSubRecs(t *testing.T) { // Helper to build simple helmRepositoryReconcileFunc with result and error. buildReconcileFuncs := func(r sreconcile.Result, e error) helmRepositoryReconcileFunc { - return func(ctx context.Context, sp *patch.SerialPatcher, obj *helmv1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { + return func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *meta.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { return r, e } } @@ -1166,11 +1282,11 @@ func TestHelmRepositoryReconciler_reconcileSubRecs(t *testing.T) { { name: "multiple object status conditions mutations", reconcileFuncs: []helmRepositoryReconcileFunc{ - func(ctx context.Context, sp *patch.SerialPatcher, obj *helmv1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { + func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *meta.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision") return sreconcile.ResultSuccess, nil }, - func(ctx context.Context, sp *patch.SerialPatcher, obj *helmv1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { + func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *meta.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { conditions.MarkTrue(obj, meta.ReconcilingCondition, meta.ProgressingReason, "creating artifact") return sreconcile.ResultSuccess, nil }, @@ -1220,16 +1336,16 @@ func TestHelmRepositoryReconciler_reconcileSubRecs(t *testing.T) { r := &HelmRepositoryReconciler{ Client: fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&helmv1.HelmRepository{}). + WithStatusSubresource(&sourcev1.HelmRepository{}). Build(), patchOptions: getPatchOptions(helmRepositoryReadyCondition.Owned, "sc"), } - obj := &helmv1.HelmRepository{ + obj := &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test-", Generation: tt.generation, }, - Status: helmv1.HelmRepositoryStatus{ + Status: sourcev1.HelmRepositoryStatus{ ObservedGeneration: tt.observedGeneration, }, } @@ -1254,13 +1370,13 @@ func TestHelmRepositoryReconciler_reconcileSubRecs(t *testing.T) { func TestHelmRepositoryReconciler_statusConditions(t *testing.T) { tests := []struct { name string - beforeFunc func(obj *helmv1.HelmRepository) + beforeFunc func(obj *sourcev1.HelmRepository) assertConditions []metav1.Condition wantErr bool }{ { name: "positive conditions only", - beforeFunc: func(obj *helmv1.HelmRepository) { + beforeFunc: func(obj *sourcev1.HelmRepository) { conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") }, assertConditions: []metav1.Condition{ @@ -1271,7 +1387,7 @@ func TestHelmRepositoryReconciler_statusConditions(t *testing.T) { }, { name: "multiple failures", - beforeFunc: func(obj *helmv1.HelmRepository) { + beforeFunc: func(obj *sourcev1.HelmRepository) { conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory") conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error") @@ -1286,7 +1402,7 @@ func TestHelmRepositoryReconciler_statusConditions(t *testing.T) { }, { name: "mixed positive and negative conditions", - beforeFunc: func(obj *helmv1.HelmRepository) { + beforeFunc: func(obj *sourcev1.HelmRepository) { conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") }, @@ -1303,10 +1419,10 @@ func TestHelmRepositoryReconciler_statusConditions(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - obj := &helmv1.HelmRepository{ + obj := &sourcev1.HelmRepository{ TypeMeta: metav1.TypeMeta{ - Kind: helmv1.HelmRepositoryKind, - APIVersion: helmv1.GroupVersion.String(), + Kind: sourcev1.HelmRepositoryKind, + APIVersion: sourcev1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Name: "helmrepo", @@ -1316,7 +1432,7 @@ func TestHelmRepositoryReconciler_statusConditions(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithObjects(obj). - WithStatusSubresource(&helmv1.HelmRepository{}) + WithStatusSubresource(&sourcev1.HelmRepository{}) c := clientBuilder.Build() @@ -1351,8 +1467,8 @@ func TestHelmRepositoryReconciler_notify(t *testing.T) { name string res sreconcile.Result resErr error - oldObjBeforeFunc func(obj *helmv1.HelmRepository) - newObjBeforeFunc func(obj *helmv1.HelmRepository) + oldObjBeforeFunc func(obj *sourcev1.HelmRepository) + newObjBeforeFunc func(obj *sourcev1.HelmRepository) wantEvent string }{ { @@ -1364,8 +1480,8 @@ func TestHelmRepositoryReconciler_notify(t *testing.T) { name: "new artifact with nil size", res: sreconcile.ResultSuccess, resErr: nil, - newObjBeforeFunc: func(obj *helmv1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy", Size: nil} + newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: nil} }, wantEvent: "Normal NewArtifact stored fetched index of unknown size", }, @@ -1373,8 +1489,8 @@ func TestHelmRepositoryReconciler_notify(t *testing.T) { name: "new artifact", res: sreconcile.ResultSuccess, resErr: nil, - newObjBeforeFunc: func(obj *helmv1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} }, wantEvent: "Normal NewArtifact stored fetched index of size", }, @@ -1382,13 +1498,13 @@ func TestHelmRepositoryReconciler_notify(t *testing.T) { name: "recovery from failure", res: sreconcile.ResultSuccess, resErr: nil, - oldObjBeforeFunc: func(obj *helmv1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, - newObjBeforeFunc: func(obj *helmv1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, wantEvent: "Normal Succeeded stored fetched index of size", @@ -1397,13 +1513,13 @@ func TestHelmRepositoryReconciler_notify(t *testing.T) { name: "recovery and new artifact", res: sreconcile.ResultSuccess, resErr: nil, - oldObjBeforeFunc: func(obj *helmv1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, - newObjBeforeFunc: func(obj *helmv1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Digest: "bbb", Size: &aSize} + newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "aaa", Digest: "bbb", Size: &aSize} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, wantEvent: "Normal NewArtifact stored fetched index of size", @@ -1412,12 +1528,12 @@ func TestHelmRepositoryReconciler_notify(t *testing.T) { name: "no updates", res: sreconcile.ResultSuccess, resErr: nil, - oldObjBeforeFunc: func(obj *helmv1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, - newObjBeforeFunc: func(obj *helmv1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, }, @@ -1428,7 +1544,7 @@ func TestHelmRepositoryReconciler_notify(t *testing.T) { g := NewWithT(t) recorder := record.NewFakeRecorder(32) - oldObj := &helmv1.HelmRepository{} + oldObj := &sourcev1.HelmRepository{} newObj := oldObj.DeepCopy() if tt.oldObjBeforeFunc != nil { @@ -1475,12 +1591,12 @@ func TestHelmRepositoryReconciler_ReconcileTypeUpdatePredicateFilter(t *testing. testServer.Start() defer testServer.Stop() - obj := &helmv1.HelmRepository{ + obj := &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "helmrepository-reconcile-", Namespace: "default", }, - Spec: helmv1.HelmRepositorySpec{ + Spec: sourcev1.HelmRepositorySpec{ Interval: metav1.Duration{Duration: interval}, URL: testServer.URL(), }, @@ -1524,7 +1640,7 @@ func TestHelmRepositoryReconciler_ReconcileTypeUpdatePredicateFilter(t *testing. g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) // Switch to a OCI helm repository type - obj.Spec.Type = helmv1.HelmRepositoryTypeOCI + obj.Spec.Type = sourcev1.HelmRepositoryTypeOCI obj.Spec.URL = fmt.Sprintf("oci://%s", testRegistryServer.registryHost) oldGen := obj.GetGeneration() @@ -1564,12 +1680,12 @@ func TestHelmRepositoryReconciler_ReconcileSpecUpdatePredicateFilter(t *testing. testServer.Start() defer testServer.Stop() - obj := &helmv1.HelmRepository{ + obj := &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "helmrepository-reconcile-", Namespace: "default", }, - Spec: helmv1.HelmRepositorySpec{ + Spec: sourcev1.HelmRepositorySpec{ Interval: metav1.Duration{Duration: interval}, URL: testServer.URL(), }, @@ -1666,12 +1782,12 @@ func TestHelmRepositoryReconciler_InMemoryCaching(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() - helmRepo := &helmv1.HelmRepository{ + helmRepo := &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "helmrepository-", Namespace: ns.Name, }, - Spec: helmv1.HelmRepositorySpec{ + Spec: sourcev1.HelmRepositorySpec{ URL: testServer.URL(), }, } @@ -1725,7 +1841,7 @@ func TestHelmRepositoryReconciler_ociMigration(t *testing.T) { g.Expect(testEnv.Cleanup(ctx, testns)).ToNot(HaveOccurred()) }) - hr := &helmv1.HelmRepository{ + hr := &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("hr-%s", randStringRunes(5)), Namespace: testns.Name, @@ -1736,8 +1852,8 @@ func TestHelmRepositoryReconciler_ociMigration(t *testing.T) { // Migrates newly created object with finalizer. hr.ObjectMeta.Finalizers = append(hr.ObjectMeta.Finalizers, "foo.bar", sourcev1.SourceFinalizer) - hr.Spec = helmv1.HelmRepositorySpec{ - Type: helmv1.HelmRepositoryTypeOCI, + hr.Spec = sourcev1.HelmRepositorySpec{ + Type: sourcev1.HelmRepositoryTypeOCI, URL: "oci://foo/bar", Interval: metav1.Duration{Duration: interval}, } diff --git a/internal/controller/ocirepository_controller.go b/internal/controller/ocirepository_controller.go index 9e6e69145..a91c8a51b 100644 --- a/internal/controller/ocirepository_controller.go +++ b/internal/controller/ocirepository_controller.go @@ -19,12 +19,15 @@ package controller import ( "context" cryptotls "crypto/tls" + "encoding/json" "errors" "fmt" "io" "net/http" + "net/url" "os" "path/filepath" + "regexp" "sort" "strings" "time" @@ -35,24 +38,21 @@ import ( "github.com/google/go-containerregistry/pkg/name" gcrv1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/notaryproject/notation-go/verifier/trustpolicy" "github.com/sigstore/cosign/v2/pkg/cosign" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" kuberecorder "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" "k8s.io/utils/ptr" - - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/ratelimiter" + "sigs.k8s.io/controller-runtime/pkg/reconcile" eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" + "github.com/fluxcd/pkg/cache" "github.com/fluxcd/pkg/oci" "github.com/fluxcd/pkg/runtime/conditions" helper "github.com/fluxcd/pkg/runtime/controller" @@ -60,22 +60,29 @@ import ( "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" rreconcile "github.com/fluxcd/pkg/runtime/reconcile" + "github.com/fluxcd/pkg/runtime/secrets" "github.com/fluxcd/pkg/sourceignore" "github.com/fluxcd/pkg/tar" "github.com/fluxcd/pkg/version" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/predicate" sourcev1 "github.com/fluxcd/source-controller/api/v1" - ociv1 "github.com/fluxcd/source-controller/api/v1beta2" serror "github.com/fluxcd/source-controller/internal/error" soci "github.com/fluxcd/source-controller/internal/oci" + scosign "github.com/fluxcd/source-controller/internal/oci/cosign" + "github.com/fluxcd/source-controller/internal/oci/notation" sreconcile "github.com/fluxcd/source-controller/internal/reconcile" "github.com/fluxcd/source-controller/internal/reconcile/summarize" - "github.com/fluxcd/source-controller/internal/tls" "github.com/fluxcd/source-controller/internal/util" ) // ociRepositoryReadyCondition contains the information required to summarize a -// v1beta2.OCIRepository Ready Condition. +// v1.OCIRepository Ready Condition. var ociRepositoryReadyCondition = summarize.Conditions{ Target: meta.ReadyCondition, Owned: []string{ @@ -112,6 +119,8 @@ var ociRepositoryFailConditions = []string{ sourcev1.StorageOperationFailedCondition, } +type filterFunc func(tags []string) ([]string, error) + type invalidOCIURLError struct { err error } @@ -120,19 +129,20 @@ func (e invalidOCIURLError) Error() string { return e.err.Error() } -// ociRepositoryReconcileFunc is the function type for all the v1beta2.OCIRepository +// ociRepositoryReconcileFunc is the function type for all the v1.OCIRepository // (sub)reconcile functions. The type implementations are grouped and // executed serially to perform the complete reconcile of the object. -type ociRepositoryReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *ociv1.OCIRepository, metadata *sourcev1.Artifact, dir string) (sreconcile.Result, error) +type ociRepositoryReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.OCIRepository, metadata *meta.Artifact, dir string) (sreconcile.Result, error) -// OCIRepositoryReconciler reconciles a v1beta2.OCIRepository object +// OCIRepositoryReconciler reconciles a v1.OCIRepository object type OCIRepositoryReconciler struct { client.Client helper.Metrics kuberecorder.EventRecorder - Storage *Storage + Storage *storage.Storage ControllerName string + TokenCache *cache.TokenCache requeueDependency time.Duration patchOptions []patch.Option @@ -140,7 +150,7 @@ type OCIRepositoryReconciler struct { type OCIRepositoryReconcilerOptions struct { DependencyRequeueInterval time.Duration - RateLimiter ratelimiter.RateLimiter + RateLimiter workqueue.TypedRateLimiter[reconcile.Request] } // SetupWithManager sets up the controller with the Manager. @@ -154,7 +164,7 @@ func (r *OCIRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, o r.requeueDependency = opts.DependencyRequeueInterval return ctrl.NewControllerManagedBy(mgr). - For(&ociv1.OCIRepository{}, builder.WithPredicates( + For(&sourcev1.OCIRepository{}, builder.WithPredicates( predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}), )). WithOptions(controller.Options{ @@ -167,13 +177,14 @@ func (r *OCIRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, o // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=ocirepositories/status,verbs=get;update;patch // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=ocirepositories/finalizers,verbs=get;create;update;patch;delete // +kubebuilder:rbac:groups="",resources=events,verbs=create;patch +// +kubebuilder:rbac:groups="",resources=serviceaccounts/token,verbs=create func (r *OCIRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { start := time.Now() log := ctrl.LoggerFrom(ctx) // Fetch the OCIRepository - obj := &ociv1.OCIRepository{} + obj := &sourcev1.OCIRepository{} if err := r.Get(ctx, req.NamespacedName, obj); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } @@ -205,9 +216,7 @@ func (r *OCIRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques } result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) - // Always record suspend, readiness and duration metrics. - r.Metrics.RecordSuspend(ctx, obj, obj.Spec.Suspend) - r.Metrics.RecordReadiness(ctx, obj) + // Always record duration metrics. r.Metrics.RecordDuration(ctx, obj, start) }() @@ -247,7 +256,7 @@ func (r *OCIRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques // reconcile iterates through the ociRepositoryReconcileFunc tasks for the // object. It returns early on the first call that returns // reconcile.ResultRequeue, or produces an error. -func (r *OCIRepositoryReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *ociv1.OCIRepository, reconcilers []ociRepositoryReconcileFunc) (sreconcile.Result, error) { +func (r *OCIRepositoryReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.OCIRepository, reconcilers []ociRepositoryReconcileFunc) (sreconcile.Result, error) { oldObj := obj.DeepCopy() rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress") @@ -279,7 +288,7 @@ func (r *OCIRepositoryReconciler) reconcile(ctx context.Context, sp *patch.Seria fmt.Errorf("failed to create temporary working directory: %w", err), sourcev1.DirCreationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } defer func() { @@ -292,7 +301,7 @@ func (r *OCIRepositoryReconciler) reconcile(ctx context.Context, sp *patch.Seria var ( res sreconcile.Result resErr error - metadata = sourcev1.Artifact{} + metadata = meta.Artifact{} ) // Run the sub-reconcilers and build the result of reconciliation. @@ -319,10 +328,10 @@ func (r *OCIRepositoryReconciler) reconcile(ctx context.Context, sp *patch.Seria } // reconcileSource fetches the upstream OCI artifact metadata and content. -// If this fails, it records v1beta2.FetchFailedCondition=True on the object and returns early. +// If this fails, it records v1.FetchFailedCondition=True on the object and returns early. func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, - obj *ociv1.OCIRepository, metadata *sourcev1.Artifact, dir string) (sreconcile.Result, error) { - var auth authn.Authenticator + obj *sourcev1.OCIRepository, metadata *meta.Artifact, dir string) (sreconcile.Result, error) { + var authenticator authn.Authenticator ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() @@ -342,35 +351,80 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch fmt.Errorf("failed to get credential: %w", err), sourcev1.AuthenticationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } - if _, ok := keychain.(soci.Anonymous); obj.Spec.Provider != ociv1.GenericOCIProvider && ok { + var proxyURL *url.URL + if obj.Spec.ProxySecretRef != nil { + var err error + proxyURL, err = secrets.ProxyURLFromSecretRef(ctx, r.Client, types.NamespacedName{ + Name: obj.Spec.ProxySecretRef.Name, + Namespace: obj.GetNamespace(), + }) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to get proxy address: %w", err), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + } + + if _, ok := keychain.(soci.Anonymous); obj.Spec.Provider != "" && obj.Spec.Provider != sourcev1.GenericOCIProvider && ok { + opts := []auth.Option{ + auth.WithClient(r.Client), + auth.WithServiceAccountNamespace(obj.GetNamespace()), + } + + if obj.Spec.ServiceAccountName != "" { + // Check object-level workload identity feature gate. + if !auth.IsObjectLevelWorkloadIdentityEnabled() { + const gate = auth.FeatureGateObjectLevelWorkloadIdentity + const msgFmt = "to use spec.serviceAccountName for provider authentication please enable the %s feature gate in the controller" + err := fmt.Errorf(msgFmt, gate) + return sreconcile.ResultEmpty, serror.NewStalling(err, meta.FeatureGateDisabledReason) + } + // Set ServiceAccountName only if explicitly specified + opts = append(opts, auth.WithServiceAccountName(obj.Spec.ServiceAccountName)) + } + if r.TokenCache != nil { + involvedObject := cache.InvolvedObject{ + Kind: sourcev1.OCIRepositoryKind, + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + Operation: cache.OperationReconcile, + } + opts = append(opts, auth.WithCache(*r.TokenCache, involvedObject)) + } + if proxyURL != nil { + opts = append(opts, auth.WithProxyURL(*proxyURL)) + } var authErr error - auth, authErr = soci.OIDCAuth(ctxTimeout, obj.Spec.URL, obj.Spec.Provider) - if authErr != nil && !errors.Is(authErr, oci.ErrUnconfiguredProvider) { + authenticator, authErr = soci.OIDCAuth(ctxTimeout, obj.Spec.URL, obj.Spec.Provider, opts...) + if authErr != nil { e := serror.NewGeneric( fmt.Errorf("failed to get credential from %s: %w", obj.Spec.Provider, authErr), sourcev1.AuthenticationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } } // Generate the transport for remote operations - transport, err := r.transport(ctx, obj) + transport, err := r.transport(ctx, obj, proxyURL) if err != nil { e := serror.NewGeneric( fmt.Errorf("failed to generate transport for '%s': %w", obj.Spec.URL, err), sourcev1.AuthenticationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } - opts := makeRemoteOptions(ctx, transport, keychain, auth) + opts := makeRemoteOptions(ctx, transport, keychain, authenticator) // Determine which artifact revision to pull ref, err := r.getArtifactRef(obj, opts) @@ -379,14 +433,14 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch e := serror.NewStalling( fmt.Errorf("URL validation failed for '%s': %w", obj.Spec.URL, err), sourcev1.URLInvalidReason) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } e := serror.NewGeneric( fmt.Errorf("failed to determine the artifact tag for '%s': %w", obj.Spec.URL, err), sourcev1.ReadOperationFailedReason) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } @@ -396,12 +450,12 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch if err != nil { e := serror.NewGeneric( fmt.Errorf("failed to determine artifact digest: %w", err), - ociv1.OCIPullFailedReason, + sourcev1.OCIPullFailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } - metaArtifact := &sourcev1.Artifact{Revision: revision} + metaArtifact := &meta.Artifact{Revision: revision} metaArtifact.DeepCopyInto(metadata) // Mark observations about the revision on the object @@ -409,7 +463,7 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch if !obj.GetArtifact().HasRevision(revision) { message := fmt.Sprintf("new revision '%s' for '%s'", revision, ref) if obj.GetArtifact() != nil { - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "%s", message) } rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message) if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { @@ -430,21 +484,23 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch conditions.GetObservedGeneration(obj, sourcev1.SourceVerifiedCondition) != obj.Generation || conditions.IsFalse(obj, sourcev1.SourceVerifiedCondition) { - err := r.verifySignature(ctx, obj, ref, opts...) + result, err := r.verifySignature(ctx, obj, ref, keychain, authenticator, transport, opts...) if err != nil { provider := obj.Spec.Verify.Provider - if obj.Spec.Verify.SecretRef == nil { + if obj.Spec.Verify.SecretRef == nil && obj.Spec.Verify.Provider == "cosign" { provider = fmt.Sprintf("%s keyless", provider) } e := serror.NewGeneric( fmt.Errorf("failed to verify the signature using provider '%s': %w", provider, err), sourcev1.VerificationError, ) - conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, e.Err.Error()) + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } - conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision %s", revision) + if result == soci.VerificationResultSuccess { + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision %s", revision) + } } // Skip pulling if the artifact revision and the source configuration has @@ -459,9 +515,9 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch if err != nil { e := serror.NewGeneric( fmt.Errorf("failed to pull artifact from '%s': %w", obj.Spec.URL, err), - ociv1.OCIPullFailedReason, + sourcev1.OCIPullFailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } @@ -470,9 +526,9 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch if err != nil { e := serror.NewGeneric( fmt.Errorf("failed to parse artifact manifest: %w", err), - ociv1.OCILayerOperationFailedReason, + sourcev1.OCILayerOperationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } metadata.Metadata = manifest.Annotations @@ -480,31 +536,31 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch // Extract the compressed content from the selected layer blob, err := r.selectLayer(obj, img) if err != nil { - e := serror.NewGeneric(err, ociv1.OCILayerOperationFailedReason) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + e := serror.NewGeneric(err, sourcev1.OCILayerOperationFailedReason) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } // Persist layer content to storage using the specified operation switch obj.GetLayerOperation() { - case ociv1.OCILayerExtract: + case sourcev1.OCILayerExtract: if err = tar.Untar(blob, dir, tar.WithMaxUntarSize(-1), tar.WithSkipSymlinks()); err != nil { e := serror.NewGeneric( fmt.Errorf("failed to extract layer contents from artifact: %w", err), - ociv1.OCILayerOperationFailedReason, + sourcev1.OCILayerOperationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } - case ociv1.OCILayerCopy: + case sourcev1.OCILayerCopy: metadata.Path = fmt.Sprintf("%s.tgz", r.digestFromRevision(metadata.Revision)) file, err := os.Create(filepath.Join(dir, metadata.Path)) if err != nil { e := serror.NewGeneric( fmt.Errorf("failed to create file to copy layer to: %w", err), - ociv1.OCILayerOperationFailedReason, + sourcev1.OCILayerOperationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } defer file.Close() @@ -513,17 +569,17 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch if err != nil { e := serror.NewGeneric( fmt.Errorf("failed to copy layer from artifact: %w", err), - ociv1.OCILayerOperationFailedReason, + sourcev1.OCILayerOperationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } default: e := serror.NewGeneric( fmt.Errorf("unsupported layer operation: %s", obj.GetLayerOperation()), - ociv1.OCILayerOperationFailedReason, + sourcev1.OCILayerOperationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } @@ -533,7 +589,7 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch // selectLayer finds the matching layer and returns its compressed contents. // If no layer selector was provided, we pick the first layer from the OCI artifact. -func (r *OCIRepositoryReconciler) selectLayer(obj *ociv1.OCIRepository, image gcrv1.Image) (io.ReadCloser, error) { +func (r *OCIRepositoryReconciler) selectLayer(obj *sourcev1.OCIRepository, image gcrv1.Image) (io.ReadCloser, error) { layers, err := image.Layers() if err != nil { return nil, fmt.Errorf("failed to parse artifact layers: %w", err) @@ -609,57 +665,64 @@ func (r *OCIRepositoryReconciler) digestFromRevision(revision string) string { } // verifySignature verifies the authenticity of the given image reference URL. +// It supports two different verification providers: cosign and notation. // First, it tries to use a key if a Secret with a valid public key is provided. -// If not, it falls back to a keyless approach for verification. -func (r *OCIRepositoryReconciler) verifySignature(ctx context.Context, obj *ociv1.OCIRepository, ref name.Reference, opt ...remote.Option) error { +// If not, when using cosign it falls back to a keyless approach for verification. +// When notation is used, a trust policy is required to verify the image. +// The verification result is returned as a VerificationResult and any error encountered. +func (r *OCIRepositoryReconciler) verifySignature(ctx context.Context, obj *sourcev1.OCIRepository, + ref name.Reference, keychain authn.Keychain, auth authn.Authenticator, + transport *http.Transport, opt ...remote.Option) (soci.VerificationResult, error) { + ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() provider := obj.Spec.Verify.Provider switch provider { case "cosign": - defaultCosignOciOpts := []soci.Options{ - soci.WithRemoteOptions(opt...), + defaultCosignOciOpts := []scosign.Options{ + scosign.WithRemoteOptions(opt...), } // get the public keys from the given secret if secretRef := obj.Spec.Verify.SecretRef; secretRef != nil { - certSecretName := types.NamespacedName{ + + verifySecret := types.NamespacedName{ Namespace: obj.Namespace, Name: secretRef.Name, } - var pubSecret corev1.Secret - if err := r.Get(ctxTimeout, certSecretName, &pubSecret); err != nil { - return err + pubSecret, err := r.retrieveSecret(ctxTimeout, verifySecret) + if err != nil { + return soci.VerificationResultFailed, err } - signatureVerified := false + signatureVerified := soci.VerificationResultFailed for k, data := range pubSecret.Data { // search for public keys in the secret if strings.HasSuffix(k, ".pub") { - verifier, err := soci.NewCosignVerifier(ctxTimeout, append(defaultCosignOciOpts, soci.WithPublicKey(data))...) + verifier, err := scosign.NewCosignVerifier(ctxTimeout, append(defaultCosignOciOpts, scosign.WithPublicKey(data))...) if err != nil { - return err + return soci.VerificationResultFailed, err } - signatures, _, err := verifier.VerifyImageSignatures(ctxTimeout, ref) - if err != nil { + result, err := verifier.Verify(ctxTimeout, ref) + if err != nil || result == soci.VerificationResultFailed { continue } - if signatures != nil { - signatureVerified = true + if result == soci.VerificationResultSuccess { + signatureVerified = result break } } } - if !signatureVerified { - return fmt.Errorf("no matching signatures were found for '%s'", ref) + if signatureVerified == soci.VerificationResultFailed { + return soci.VerificationResultFailed, fmt.Errorf("no matching signatures were found for '%s'", ref) } - return nil + return soci.VerificationResultSuccess, nil } // if no secret is provided, try keyless verification @@ -672,35 +735,115 @@ func (r *OCIRepositoryReconciler) verifySignature(ctx context.Context, obj *ociv SubjectRegExp: match.Subject, }) } - defaultCosignOciOpts = append(defaultCosignOciOpts, soci.WithIdentities(identities)) + defaultCosignOciOpts = append(defaultCosignOciOpts, scosign.WithIdentities(identities)) - verifier, err := soci.NewCosignVerifier(ctxTimeout, defaultCosignOciOpts...) + verifier, err := scosign.NewCosignVerifier(ctxTimeout, defaultCosignOciOpts...) if err != nil { - return err + return soci.VerificationResultFailed, err } - signatures, _, err := verifier.VerifyImageSignatures(ctxTimeout, ref) + result, err := verifier.Verify(ctxTimeout, ref) if err != nil { - return err + return soci.VerificationResultFailed, err } - if len(signatures) > 0 { - return nil + if result == soci.VerificationResultFailed { + return soci.VerificationResultFailed, fmt.Errorf("no matching signatures were found for '%s'", ref) + } + + return soci.VerificationResultSuccess, nil + + case "notation": + // get the public keys from the given secret + secretRef := obj.Spec.Verify.SecretRef + + if secretRef == nil { + return soci.VerificationResultFailed, fmt.Errorf("verification secret cannot be empty: '%s'", ref) + } + + verifySecret := types.NamespacedName{ + Namespace: obj.Namespace, + Name: secretRef.Name, + } + + pubSecret, err := r.retrieveSecret(ctxTimeout, verifySecret) + if err != nil { + return soci.VerificationResultFailed, err + } + + data, ok := pubSecret.Data[notation.DefaultTrustPolicyKey] + if !ok { + return soci.VerificationResultFailed, fmt.Errorf("'%s' not found in secret '%s'", notation.DefaultTrustPolicyKey, verifySecret.String()) + } + + var doc trustpolicy.Document + + if err := json.Unmarshal(data, &doc); err != nil { + return soci.VerificationResultFailed, fmt.Errorf("error occurred while parsing %s: %w", notation.DefaultTrustPolicyKey, err) + } + + var certs [][]byte + + for k, data := range pubSecret.Data { + if strings.HasSuffix(k, ".crt") || strings.HasSuffix(k, ".pem") { + certs = append(certs, data) + } + } + + if certs == nil { + return soci.VerificationResultFailed, fmt.Errorf("no certificates found in secret '%s'", verifySecret.String()) + } + + trustPolicy := notation.CleanTrustPolicy(&doc, ctrl.LoggerFrom(ctx)) + defaultNotationOciOpts := []notation.Options{ + notation.WithTrustPolicy(trustPolicy), + notation.WithRemoteOptions(opt...), + notation.WithAuth(auth), + notation.WithKeychain(keychain), + notation.WithInsecureRegistry(obj.Spec.Insecure), + notation.WithLogger(ctrl.LoggerFrom(ctx)), + notation.WithRootCertificates(certs), + notation.WithTransport(transport), + } + + verifier, err := notation.NewNotationVerifier(defaultNotationOciOpts...) + if err != nil { + return soci.VerificationResultFailed, err + } + + result, err := verifier.Verify(ctxTimeout, ref) + if err != nil { + return result, err } - return fmt.Errorf("no matching signatures were found for '%s'", ref) + if result == soci.VerificationResultFailed { + return soci.VerificationResultFailed, fmt.Errorf("no matching signatures were found for '%s'", ref) + } + + return result, nil + default: + return soci.VerificationResultFailed, fmt.Errorf("unsupported verification provider: %s", obj.Spec.Verify.Provider) } +} - return nil +// retrieveSecret retrieves a secret from the specified namespace with the given secret name. +// It returns the retrieved secret and any error encountered during the retrieval process. +func (r *OCIRepositoryReconciler) retrieveSecret(ctx context.Context, verifySecret types.NamespacedName) (corev1.Secret, error) { + var pubSecret corev1.Secret + + if err := r.Get(ctx, verifySecret, &pubSecret); err != nil { + return corev1.Secret{}, err + } + return pubSecret, nil } // parseRepository validates and extracts the repository URL. -func (r *OCIRepositoryReconciler) parseRepository(obj *ociv1.OCIRepository) (name.Repository, error) { - if !strings.HasPrefix(obj.Spec.URL, ociv1.OCIRepositoryPrefix) { +func (r *OCIRepositoryReconciler) parseRepository(obj *sourcev1.OCIRepository) (name.Repository, error) { + if !strings.HasPrefix(obj.Spec.URL, sourcev1.OCIRepositoryPrefix) { return name.Repository{}, fmt.Errorf("URL must be in format 'oci:////'") } - url := strings.TrimPrefix(obj.Spec.URL, ociv1.OCIRepositoryPrefix) + url := strings.TrimPrefix(obj.Spec.URL, sourcev1.OCIRepositoryPrefix) options := []name.Option{} if obj.Spec.Insecure { @@ -720,7 +863,7 @@ func (r *OCIRepositoryReconciler) parseRepository(obj *ociv1.OCIRepository) (nam } // getArtifactRef determines which tag or revision should be used and returns the OCI artifact FQN. -func (r *OCIRepositoryReconciler) getArtifactRef(obj *ociv1.OCIRepository, options []remote.Option) (name.Reference, error) { +func (r *OCIRepositoryReconciler) getArtifactRef(obj *sourcev1.OCIRepository, options []remote.Option) (name.Reference, error) { repo, err := r.parseRepository(obj) if err != nil { return nil, invalidOCIURLError{err} @@ -732,7 +875,7 @@ func (r *OCIRepositoryReconciler) getArtifactRef(obj *ociv1.OCIRepository, optio } if obj.Spec.Reference.SemVer != "" { - return r.getTagBySemver(repo, obj.Spec.Reference.SemVer, options) + return r.getTagBySemver(repo, obj.Spec.Reference.SemVer, filterTags(obj.Spec.Reference.SemverFilter), options) } if obj.Spec.Reference.Tag != "" { @@ -745,19 +888,24 @@ func (r *OCIRepositoryReconciler) getArtifactRef(obj *ociv1.OCIRepository, optio // getTagBySemver call the remote container registry, fetches all the tags from the repository, // and returns the latest tag according to the semver expression. -func (r *OCIRepositoryReconciler) getTagBySemver(repo name.Repository, exp string, options []remote.Option) (name.Reference, error) { +func (r *OCIRepositoryReconciler) getTagBySemver(repo name.Repository, exp string, filter filterFunc, options []remote.Option) (name.Reference, error) { tags, err := remote.List(repo, options...) if err != nil { return nil, err } + validTags, err := filter(tags) + if err != nil { + return nil, err + } + constraint, err := semver.NewConstraint(exp) if err != nil { return nil, fmt.Errorf("semver '%s' parse error: %w", exp, err) } var matchingVersions []*semver.Version - for _, t := range tags { + for _, t := range validTags { v, err := version.ParseVersion(t) if err != nil { continue @@ -779,88 +927,88 @@ func (r *OCIRepositoryReconciler) getTagBySemver(repo name.Repository, exp strin // keychain generates the credential keychain based on the resource // configuration. If no auth is specified a default keychain with // anonymous access is returned -func (r *OCIRepositoryReconciler) keychain(ctx context.Context, obj *ociv1.OCIRepository) (authn.Keychain, error) { - pullSecretNames := sets.NewString() +func (r *OCIRepositoryReconciler) keychain(ctx context.Context, obj *sourcev1.OCIRepository) (authn.Keychain, error) { + var imagePullSecrets []corev1.Secret // lookup auth secret if obj.Spec.SecretRef != nil { - pullSecretNames.Insert(obj.Spec.SecretRef.Name) + var imagePullSecret corev1.Secret + secretRef := types.NamespacedName{Namespace: obj.Namespace, Name: obj.Spec.SecretRef.Name} + err := r.Get(ctx, secretRef, &imagePullSecret) + if err != nil { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.AuthenticationFailedReason, + "auth secret '%s' not found", obj.Spec.SecretRef.Name) + return nil, fmt.Errorf("failed to get secret '%s': %w", secretRef, err) + } + imagePullSecrets = append(imagePullSecrets, imagePullSecret) } // lookup service account if obj.Spec.ServiceAccountName != "" { - serviceAccountName := obj.Spec.ServiceAccountName - serviceAccount := corev1.ServiceAccount{} - err := r.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: serviceAccountName}, &serviceAccount) + saRef := types.NamespacedName{Namespace: obj.Namespace, Name: obj.Spec.ServiceAccountName} + saSecrets, err := secrets.PullSecretsFromServiceAccountRef(ctx, r.Client, saRef) if err != nil { return nil, err } - for _, ips := range serviceAccount.ImagePullSecrets { - pullSecretNames.Insert(ips.Name) - } + imagePullSecrets = append(imagePullSecrets, saSecrets...) } // if no pullsecrets available return an AnonymousKeychain - if len(pullSecretNames) == 0 { + if len(imagePullSecrets) == 0 { return soci.Anonymous{}, nil } - // lookup image pull secrets - imagePullSecrets := make([]corev1.Secret, len(pullSecretNames)) - for i, imagePullSecretName := range pullSecretNames.List() { - imagePullSecret := corev1.Secret{} - err := r.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: imagePullSecretName}, &imagePullSecret) - if err != nil { - r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.AuthenticationFailedReason, - "auth secret '%s' not found", imagePullSecretName) - return nil, err - } - imagePullSecrets[i] = imagePullSecret - } - return k8schain.NewFromPullSecrets(ctx, imagePullSecrets) } // transport clones the default transport from remote and when a certSecretRef is specified, // the returned transport will include the TLS client and/or CA certificates. -func (r *OCIRepositoryReconciler) transport(ctx context.Context, obj *ociv1.OCIRepository) (*http.Transport, error) { +// If the insecure flag is set, the transport will skip the verification of the server's certificate. +// Additionally, if a proxy is specified, transport will use it. +func (r *OCIRepositoryReconciler) transport(ctx context.Context, obj *sourcev1.OCIRepository, proxyURL *url.URL) (*http.Transport, error) { transport := remote.DefaultTransport.(*http.Transport).Clone() + tlsConfig, err := r.getTLSConfig(ctx, obj) + if err != nil { + return nil, err + } + if tlsConfig != nil { + transport.TLSClientConfig = tlsConfig + } + + if proxyURL != nil { + transport.Proxy = http.ProxyURL(proxyURL) + } + + return transport, nil +} + +// getTLSConfig gets the TLS configuration for the transport based on the +// specified secret reference in the OCIRepository object, or the insecure flag. +func (r *OCIRepositoryReconciler) getTLSConfig(ctx context.Context, obj *sourcev1.OCIRepository) (*cryptotls.Config, error) { if obj.Spec.CertSecretRef == nil || obj.Spec.CertSecretRef.Name == "" { if obj.Spec.Insecure { - transport.TLSClientConfig = &cryptotls.Config{ + // NOTE: This is the only place in Flux where InsecureSkipVerify is allowed. + // This exception is made for OCIRepository to maintain backward compatibility + // with tools like crane that require insecure connections without certificates. + // This only applies when no CertSecretRef is provided AND insecure is explicitly set. + // All other controllers must NOT allow InsecureSkipVerify per our security policy. + return &cryptotls.Config{ InsecureSkipVerify: true, - } + }, nil } - return transport, nil + return nil, nil } - certSecretName := types.NamespacedName{ + secretName := types.NamespacedName{ Namespace: obj.Namespace, Name: obj.Spec.CertSecretRef.Name, } - var certSecret corev1.Secret - if err := r.Get(ctx, certSecretName, &certSecret); err != nil { - return nil, err - } - - tlsConfig, _, err := tls.KubeTLSClientConfigFromSecret(certSecret, "") - if err != nil { - return nil, err - } - if tlsConfig == nil { - tlsConfig, _, err = tls.TLSClientConfigFromSecret(certSecret, "") - if err != nil { - return nil, err - } - if tlsConfig != nil { - ctrl.LoggerFrom(ctx). - Info("warning: specifying TLS auth data via `certFile`/`keyFile`/`caFile` is deprecated, please use `tls.crt`/`tls.key`/`ca.crt` instead") - } - } - transport.TLSClientConfig = tlsConfig - - return transport, nil + // NOTE: Use WithSystemCertPool to maintain backward compatibility with the existing + // extend approach (system CAs + user CA) rather than the default replace approach (user CA only). + // This ensures source-controller continues to work with both system and user-provided CA certificates. + var tlsOpts = []secrets.TLSConfigOption{secrets.WithSystemCertPool()} + return secrets.TLSConfigFromSecretRef(ctx, r.Client, secretName, tlsOpts...) } // reconcileStorage ensures the current state of the storage matches the @@ -876,7 +1024,7 @@ func (r *OCIRepositoryReconciler) transport(ctx context.Context, obj *ociv1.OCIR // The hostname of any URL in the Status of the object are updated, to ensure // they match the Storage server hostname of current runtime. func (r *OCIRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, - obj *ociv1.OCIRepository, _ *sourcev1.Artifact, _ string) (sreconcile.Result, error) { + obj *sourcev1.OCIRepository, _ *meta.Artifact, _ string) (sreconcile.Result, error) { // Garbage collect previous advertised artifact(s) from storage _ = r.garbageCollect(ctx, obj) @@ -914,7 +1062,7 @@ func (r *OCIRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patc if artifactMissing { msg += ": disappeared from storage" } - rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, msg) + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg) conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) @@ -933,13 +1081,13 @@ func (r *OCIRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patc // (Status) data on the object does not match the given. // // The inspection of the given data to the object is differed, ensuring any -// stale observations like v1beta2.ArtifactOutdatedCondition are removed. +// stale observations like v1.ArtifactOutdatedCondition are removed. // If the given Artifact does not differ from the object's current, it returns // early. // On a successful archive, the Artifact in the Status of the object is set, // and the symlink in the Storage is updated to its path. func (r *OCIRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, - obj *ociv1.OCIRepository, metadata *sourcev1.Artifact, dir string) (sreconcile.Result, error) { + obj *sourcev1.OCIRepository, metadata *meta.Artifact, dir string) (sreconcile.Result, error) { // Create artifact artifact := r.Storage.NewArtifactFor(obj.Kind, obj, metadata.Revision, fmt.Sprintf("%s.tar.gz", r.digestFromRevision(metadata.Revision))) @@ -966,14 +1114,14 @@ func (r *OCIRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pat fmt.Errorf("failed to stat source path: %w", err), sourcev1.StatOperationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } else if !f.IsDir() { e := serror.NewGeneric( fmt.Errorf("source path '%s' is not a directory", dir), sourcev1.InvalidPathReason, ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } @@ -983,7 +1131,7 @@ func (r *OCIRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pat fmt.Errorf("failed to create artifact directory: %w", err), sourcev1.DirCreationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } unlock, err := r.Storage.Lock(artifact) @@ -996,13 +1144,13 @@ func (r *OCIRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pat defer unlock() switch obj.GetLayerOperation() { - case ociv1.OCILayerCopy: + case sourcev1.OCILayerCopy: if err = r.Storage.CopyFromPath(&artifact, filepath.Join(dir, metadata.Path)); err != nil { e := serror.NewGeneric( fmt.Errorf("unable to copy artifact to storage: %w", err), sourcev1.ArchiveOperationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } default: @@ -1019,12 +1167,12 @@ func (r *OCIRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pat ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), ignoreDomain)...) } - if err := r.Storage.Archive(&artifact, dir, SourceIgnoreFilter(ps, ignoreDomain)); err != nil { + if err := r.Storage.Archive(&artifact, dir, storage.SourceIgnoreFilter(ps, ignoreDomain)); err != nil { e := serror.NewGeneric( fmt.Errorf("unable to archive artifact to storage: %s", err), sourcev1.ArchiveOperationFailedReason, ) - conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error()) + conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } } @@ -1032,7 +1180,6 @@ func (r *OCIRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pat // Record the observations on the object. obj.Status.Artifact = artifact.DeepCopy() obj.Status.Artifact.Metadata = metadata.Metadata - obj.Status.ContentConfigChecksum = "" // To be removed in the next API version. obj.Status.ObservedIgnore = obj.Spec.Ignore obj.Status.ObservedLayerSelector = obj.Spec.LayerSelector @@ -1052,7 +1199,7 @@ func (r *OCIRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pat // reconcileDelete handles the deletion of the object. // It first garbage collects all Artifacts for the object from the Storage. // Removing the finalizer from the object if successful. -func (r *OCIRepositoryReconciler) reconcileDelete(ctx context.Context, obj *ociv1.OCIRepository) (sreconcile.Result, error) { +func (r *OCIRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.OCIRepository) (sreconcile.Result, error) { // Garbage collect the resource's artifacts if err := r.garbageCollect(ctx, obj); err != nil { // Return the error so we retry the failed garbage collection @@ -1062,6 +1209,10 @@ func (r *OCIRepositoryReconciler) reconcileDelete(ctx context.Context, obj *ociv // Remove our finalizer from the list controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + // Cleanup caches. + r.TokenCache.DeleteEventsForObject(sourcev1.OCIRepositoryKind, + obj.GetName(), obj.GetNamespace(), cache.OperationReconcile) + // Stop reconciliation as the object is being deleted return sreconcile.ResultEmpty, nil } @@ -1071,7 +1222,7 @@ func (r *OCIRepositoryReconciler) reconcileDelete(ctx context.Context, obj *ociv // It removes all but the current Artifact from the Storage, unless the // deletion timestamp on the object is set. Which will result in the // removal of all Artifacts for the objects. -func (r *OCIRepositoryReconciler) garbageCollect(ctx context.Context, obj *ociv1.OCIRepository) error { +func (r *OCIRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.OCIRepository) error { if !obj.DeletionTimestamp.IsZero() { if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { return serror.NewGeneric( @@ -1095,7 +1246,7 @@ func (r *OCIRepositoryReconciler) garbageCollect(ctx context.Context, obj *ociv1 } if len(delFiles) > 0 { r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", - fmt.Sprintf("garbage collected %d artifacts", len(delFiles))) + "garbage collected %d artifacts", len(delFiles)) return nil } } @@ -1119,7 +1270,7 @@ func (r *OCIRepositoryReconciler) eventLogf(ctx context.Context, obj runtime.Obj } // notify emits notification related to the reconciliation. -func (r *OCIRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *ociv1.OCIRepository, res sreconcile.Result, resErr error) { +func (r *OCIRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.OCIRepository, res sreconcile.Result, resErr error) { // Notify successful reconciliation for new artifact and recovery from any // failure. if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil { @@ -1185,7 +1336,7 @@ type remoteOptions []remote.Option // ociContentConfigChanged evaluates the current spec with the observations // of the artifact in the status to determine if artifact content configuration // has changed and requires rebuilding the artifact. -func ociContentConfigChanged(obj *ociv1.OCIRepository) bool { +func ociContentConfigChanged(obj *sourcev1.OCIRepository) bool { if !ptr.Equal(obj.Spec.Ignore, obj.Status.ObservedIgnore) { return true } @@ -1200,7 +1351,7 @@ func ociContentConfigChanged(obj *ociv1.OCIRepository) bool { // Returns true if both arguments are nil or both arguments // dereference to the same value. // Based on k8s.io/utils/pointer/pointer.go pointer value equality. -func layerSelectorEqual(a, b *ociv1.OCILayerSelector) bool { +func layerSelectorEqual(a, b *sourcev1.OCILayerSelector) bool { if (a == nil) != (b == nil) { return false } @@ -1209,3 +1360,24 @@ func layerSelectorEqual(a, b *ociv1.OCILayerSelector) bool { } return *a == *b } + +func filterTags(filter string) filterFunc { + return func(tags []string) ([]string, error) { + if filter == "" { + return tags, nil + } + + match, err := regexp.Compile(filter) + if err != nil { + return nil, err + } + + validTags := []string{} + for _, tag := range tags { + if match.MatchString(tag) { + validTags = append(validTags, tag) + } + } + return validTags, nil + } +} diff --git a/internal/controller/ocirepository_controller_test.go b/internal/controller/ocirepository_controller_test.go index 86f034432..6ea35e962 100644 --- a/internal/controller/ocirepository_controller_test.go +++ b/internal/controller/ocirepository_controller_test.go @@ -19,6 +19,7 @@ package controller import ( "crypto/tls" "crypto/x509" + "encoding/json" "errors" "fmt" "net/http" @@ -35,7 +36,14 @@ import ( gcrv1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/mutate" "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/notaryproject/notation-core-go/signature/cose" + "github.com/notaryproject/notation-core-go/testhelper" + "github.com/notaryproject/notation-go" + "github.com/notaryproject/notation-go/registry" + "github.com/notaryproject/notation-go/signer" + "github.com/notaryproject/notation-go/verifier/trustpolicy" . "github.com/onsi/gomega" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" coptions "github.com/sigstore/cosign/v2/cmd/cosign/cli/options" "github.com/sigstore/cosign/v2/cmd/cosign/cli/sign" "github.com/sigstore/cosign/v2/pkg/cosign" @@ -44,6 +52,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" + oras "oras.land/oras-go/v2/registry/remote" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -51,6 +60,9 @@ import ( kstatus "github.com/fluxcd/cli-utils/pkg/kstatus/status" "github.com/fluxcd/pkg/apis/meta" + intdigest "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" "github.com/fluxcd/pkg/git" "github.com/fluxcd/pkg/oci" "github.com/fluxcd/pkg/runtime/conditions" @@ -59,10 +71,10 @@ import ( "github.com/fluxcd/pkg/tar" sourcev1 "github.com/fluxcd/source-controller/api/v1" - ociv1 "github.com/fluxcd/source-controller/api/v1beta2" - intdigest "github.com/fluxcd/source-controller/internal/digest" serror "github.com/fluxcd/source-controller/internal/error" + snotation "github.com/fluxcd/source-controller/internal/oci/notation" sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + testproxy "github.com/fluxcd/source-controller/tests/proxy" ) func TestOCIRepositoryReconciler_deleteBeforeFinalizer(t *testing.T) { @@ -77,10 +89,10 @@ func TestOCIRepositoryReconciler_deleteBeforeFinalizer(t *testing.T) { g.Expect(k8sClient.Delete(ctx, namespace)).NotTo(HaveOccurred()) }) - ocirepo := &ociv1.OCIRepository{} + ocirepo := &sourcev1.OCIRepository{} ocirepo.Name = "test-ocirepo" ocirepo.Namespace = namespaceName - ocirepo.Spec = ociv1.OCIRepositorySpec{ + ocirepo.Spec = sourcev1.OCIRepositorySpec{ Interval: metav1.Duration{Duration: interval}, URL: "oci://example.com", } @@ -132,7 +144,7 @@ func TestOCIRepository_Reconcile(t *testing.T) { tag: podinfoVersions["6.1.6"].tag, revision: fmt.Sprintf("%s@%s", podinfoVersions["6.1.6"].tag, podinfoVersions["6.1.6"].digest.String()), mediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - operation: ociv1.OCILayerCopy, + operation: sourcev1.OCILayerCopy, assertArtifact: []artifactFixture{ { expectedPath: "kustomize/deployment.yaml", @@ -170,15 +182,15 @@ func TestOCIRepository_Reconcile(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() - origObj := &ociv1.OCIRepository{ + origObj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "ocirepository-reconcile", Namespace: ns.Name, }, - Spec: ociv1.OCIRepositorySpec{ + Spec: sourcev1.OCIRepositorySpec{ URL: tt.url, Interval: metav1.Duration{Duration: 60 * time.Minute}, - Reference: &ociv1.OCIRepositoryRef{}, + Reference: &sourcev1.OCIRepositoryRef{}, Insecure: true, }, } @@ -191,7 +203,7 @@ func TestOCIRepository_Reconcile(t *testing.T) { obj.Spec.Reference.SemVer = tt.semver } if tt.mediaType != "" { - obj.Spec.LayerSelector = &ociv1.OCILayerSelector{MediaType: tt.mediaType} + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{MediaType: tt.mediaType} if tt.operation != "" { obj.Spec.LayerSelector.Operation = tt.operation @@ -338,18 +350,18 @@ func TestOCIRepository_Reconcile_MediaType(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "ocirepository-reconcile", Namespace: ns.Name, }, - Spec: ociv1.OCIRepositorySpec{ + Spec: sourcev1.OCIRepositorySpec{ URL: tt.url, Interval: metav1.Duration{Duration: 60 * time.Minute}, - Reference: &ociv1.OCIRepositoryRef{ + Reference: &sourcev1.OCIRepositoryRef{ Tag: tt.tag, }, - LayerSelector: &ociv1.OCILayerSelector{ + LayerSelector: &sourcev1.OCILayerSelector{ MediaType: tt.mediaType, }, Insecure: true, @@ -493,7 +505,7 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { crane.Insecure, }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, ociv1.OCIPullFailedReason, "failed to determine artifact digest"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "%s", "failed to determine artifact digest"), }, }, { @@ -517,7 +529,7 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { includeSecret: true, }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, ociv1.OCIPullFailedReason, "UNAUTHORIZED"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "%s", "UNAUTHORIZED"), }, }, { @@ -541,7 +553,7 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { includeSA: true, }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, ociv1.OCIPullFailedReason, "UNAUTHORIZED"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "%s", "UNAUTHORIZED"), }, }, { @@ -565,8 +577,8 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { }, }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), - *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), }, }, { @@ -590,8 +602,8 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { }, }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), - *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), }, }, { @@ -608,7 +620,7 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { }), }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, ociv1.OCIPullFailedReason, "failed to determine artifact digest"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "%s", "failed to determine artifact digest"), }, }, { @@ -633,7 +645,7 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { }, }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, ociv1.AuthenticationFailedReason, "cannot append certificate into certificate pool: invalid CA certificate"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "%s", "failed to parse CA certificate"), }, }, { @@ -658,8 +670,8 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { }, }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), - *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), }, }, { @@ -671,7 +683,7 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { crane.Insecure, }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get credential from"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "%s", "failed to get credential from"), }, }, { @@ -695,8 +707,8 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { insecure: true, provider: "azure", assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), - *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), }, }, } @@ -707,14 +719,14 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&ociv1.OCIRepository{}) + WithStatusSubresource(&sourcev1.OCIRepository{}) - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "auth-strategy-", Generation: 1, }, - Spec: ociv1.OCIRepositorySpec{ + Spec: sourcev1.OCIRepositorySpec{ Interval: metav1.Duration{Duration: interval}, Timeout: &metav1.Duration{Duration: timeout}, }, @@ -730,7 +742,7 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { img, err := createPodinfoImageFromTar("podinfo-6.1.6.tar", "6.1.6", server.registryHost, tt.craneOpts...) g.Expect(err).ToNot(HaveOccurred()) obj.Spec.URL = img.url - obj.Spec.Reference = &ociv1.OCIRepositoryRef{ + obj.Spec.Reference = &sourcev1.OCIRepositoryRef{ Tag: img.tag, } @@ -810,7 +822,7 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { sp := patch.NewSerialPatcher(obj, r.Client) tmpDir := t.TempDir() - got, err := r.reconcileSource(ctx, sp, obj, &sourcev1.Artifact{}, tmpDir) + got, err := r.reconcileSource(ctx, sp, obj, &meta.Artifact{}, tmpDir) if tt.wantErr { g.Expect(err).ToNot(BeNil()) } else { @@ -861,9 +873,9 @@ func TestOCIRepository_CertSecret(t *testing.T) { tlsSecretClientCert := corev1.Secret{ Data: map[string][]byte{ - oci.CACert: tlsCA, - oci.ClientCert: clientPublicKey, - oci.ClientKey: clientPrivateKey, + "caFile": tlsCA, + "certFile": clientPublicKey, + "keyFile": clientPrivateKey, }, } @@ -896,13 +908,13 @@ func TestOCIRepository_CertSecret(t *testing.T) { digest: pi.digest, certSecret: &corev1.Secret{ Data: map[string][]byte{ - oci.CACert: tlsCA, - oci.ClientCert: clientPublicKey, - oci.ClientKey: []byte("invalid-key"), + "caFile": tlsCA, + "certFile": clientPublicKey, + "keyFile": []byte("invalid-key"), }, }, expectreadyconition: false, - expectedstatusmessage: "failed to generate transport for '': tls: failed to find any PEM data in key input", + expectedstatusmessage: "failed to generate transport for '': failed to parse TLS certificate and key: tls: failed to find any PEM data in key input", }, } @@ -914,16 +926,16 @@ func TestOCIRepository_CertSecret(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "ocirepository-test-resource", Namespace: ns.Name, Generation: 1, }, - Spec: ociv1.OCIRepositorySpec{ + Spec: sourcev1.OCIRepositorySpec{ URL: tt.url, Interval: metav1.Duration{Duration: 60 * time.Minute}, - Reference: &ociv1.OCIRepositoryRef{Digest: tt.digest.String()}, + Reference: &sourcev1.OCIRepositoryRef{Digest: tt.digest.String()}, }, } @@ -943,7 +955,133 @@ func TestOCIRepository_CertSecret(t *testing.T) { key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} - resultobj := ociv1.OCIRepository{} + resultobj := sourcev1.OCIRepository{} + + // Wait for the finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, &resultobj); err != nil { + return false + } + return len(resultobj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for the object to be ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, &resultobj); err != nil { + return false + } + readyCondition := conditions.Get(&resultobj, meta.ReadyCondition) + if readyCondition == nil || conditions.IsUnknown(&resultobj, meta.ReadyCondition) { + return false + } + return obj.Generation == readyCondition.ObservedGeneration && + conditions.IsReady(&resultobj) == tt.expectreadyconition + }, timeout).Should(BeTrue()) + + tt.expectedstatusmessage = strings.ReplaceAll(tt.expectedstatusmessage, "", pi.url) + + readyCondition := conditions.Get(&resultobj, meta.ReadyCondition) + g.Expect(readyCondition.Message).Should(ContainSubstring(tt.expectedstatusmessage)) + + // Wait for the object to be deleted + g.Expect(testEnv.Delete(ctx, &resultobj)).To(Succeed()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, &resultobj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) + }) + } +} + +func TestOCIRepository_ProxySecret(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + regServer, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + t.Cleanup(func() { + regServer.Close() + }) + + pi, err := createPodinfoImageFromTar("podinfo-6.1.5.tar", "6.1.5", regServer.registryHost) + g.Expect(err).NotTo(HaveOccurred()) + + proxyAddr, proxyPort := testproxy.New(t) + + tests := []struct { + name string + url string + digest gcrv1.Hash + proxySecret *corev1.Secret + expectreadyconition bool + expectedstatusmessage string + }{ + { + name: "test proxied connection", + url: pi.url, + digest: pi.digest, + proxySecret: &corev1.Secret{ + Data: map[string][]byte{ + "address": []byte(fmt.Sprintf("http://%s", proxyAddr)), + }, + }, + expectreadyconition: true, + expectedstatusmessage: fmt.Sprintf("stored artifact for digest '%s'", pi.digest.String()), + }, + { + name: "test proxy connection error", + url: pi.url, + digest: pi.digest, + proxySecret: &corev1.Secret{ + Data: map[string][]byte{ + "address": []byte(fmt.Sprintf("http://localhost:%d", proxyPort+1)), + }, + }, + expectreadyconition: false, + expectedstatusmessage: "failed to pull artifact", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "ocirepository-test") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "ocirepository-test-resource", + Namespace: ns.Name, + Generation: 1, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: tt.url, + Interval: metav1.Duration{Duration: 60 * time.Minute}, + Reference: &sourcev1.OCIRepositoryRef{Digest: tt.digest.String()}, + }, + } + + if tt.proxySecret != nil { + tt.proxySecret.ObjectMeta = metav1.ObjectMeta{ + GenerateName: "proxy-secretref", + Namespace: ns.Name, + } + + g.Expect(testEnv.CreateAndWait(ctx, tt.proxySecret)).To(Succeed()) + defer func() { g.Expect(testEnv.Delete(ctx, tt.proxySecret)).To(Succeed()) }() + + obj.Spec.ProxySecretRef = &meta.LocalObjectReference{Name: tt.proxySecret.Name} + } + + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + resultobj := sourcev1.OCIRepository{} // Wait for the finalizer to be set g.Eventually(func() bool { @@ -953,7 +1091,7 @@ func TestOCIRepository_CertSecret(t *testing.T) { return len(resultobj.Finalizers) > 0 }, timeout).Should(BeTrue()) - // Wait for the object to fail + // Wait for the object to be ready g.Eventually(func() bool { if err := testEnv.Get(ctx, key, &resultobj); err != nil { return false @@ -968,155 +1106,707 @@ func TestOCIRepository_CertSecret(t *testing.T) { tt.expectedstatusmessage = strings.ReplaceAll(tt.expectedstatusmessage, "", pi.url) - readyCondition := conditions.Get(&resultobj, meta.ReadyCondition) - g.Expect(readyCondition.Message).Should(ContainSubstring(tt.expectedstatusmessage)) + readyCondition := conditions.Get(&resultobj, meta.ReadyCondition) + g.Expect(readyCondition.Message).Should(ContainSubstring(tt.expectedstatusmessage)) + + // Wait for the object to be deleted + g.Expect(testEnv.Delete(ctx, &resultobj)).To(Succeed()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, &resultobj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) + }) + } +} + +func TestOCIRepository_reconcileSource_remoteReference(t *testing.T) { + g := NewWithT(t) + + tmpDir := t.TempDir() + server, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + podinfoVersions, err := pushMultiplePodinfoImages(server.registryHost, true, "6.1.4", "6.1.5", "6.1.6") + g.Expect(err).ToNot(HaveOccurred()) + + img6 := podinfoVersions["6.1.6"] + img5 := podinfoVersions["6.1.5"] + + tests := []struct { + name string + reference *sourcev1.OCIRepositoryRef + want sreconcile.Result + wantErr bool + wantRevision string + assertConditions []metav1.Condition + }{ + { + name: "no reference (latest tag)", + want: sreconcile.ResultSuccess, + wantRevision: fmt.Sprintf("latest@%s", img6.digest.String()), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + }, + }, + { + name: "tag reference", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.6", + }, + want: sreconcile.ResultSuccess, + wantRevision: fmt.Sprintf("%s@%s", img6.tag, img6.digest.String()), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + }, + }, + { + name: "semver reference", + reference: &sourcev1.OCIRepositoryRef{ + SemVer: ">= 6.1.5", + }, + want: sreconcile.ResultSuccess, + wantRevision: fmt.Sprintf("%s@%s", img6.tag, img6.digest.String()), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + }, + }, + { + name: "digest reference", + reference: &sourcev1.OCIRepositoryRef{ + Digest: img6.digest.String(), + }, + wantRevision: img6.digest.String(), + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + }, + }, + { + name: "invalid tag reference", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.0", + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, " MANIFEST_UNKNOWN"), + }, + }, + { + name: "invalid semver reference", + reference: &sourcev1.OCIRepositoryRef{ + SemVer: "<= 6.1.0", + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.ReadOperationFailedReason, "failed to determine the artifact tag for 'oci://%s/podinfo': no match found for semver: <= 6.1.0", server.registryHost), + }, + }, + { + name: "invalid digest reference", + reference: &sourcev1.OCIRepositoryRef{ + Digest: "invalid", + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "failed to determine artifact digest"), + }, + }, + { + name: "semver should take precedence over tag", + reference: &sourcev1.OCIRepositoryRef{ + SemVer: ">= 6.1.5", + Tag: "6.1.5", + }, + want: sreconcile.ResultSuccess, + wantRevision: fmt.Sprintf("%s@%s", img6.tag, img6.digest.String()), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + }, + }, + { + name: "digest should take precedence over semver", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.6", + SemVer: ">= 6.1.6", + Digest: img5.digest.String(), + }, + want: sreconcile.ResultSuccess, + wantRevision: img5.digest.String(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + }, + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.OCIRepository{}) + + r := &OCIRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "checkout-strategy-", + Generation: 1, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost), + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + Insecure: true, + }, + } + + if tt.reference != nil { + obj.Spec.Reference = tt.reference + } + + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) + + artifact := &meta.Artifact{} + tmpDir := t.TempDir() + got, err := r.reconcileSource(ctx, sp, obj, artifact, tmpDir) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(artifact.Revision).To(Equal(tt.wantRevision)) + } + + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestOCIRepository_reconcileSource_verifyOCISourceSignatureNotation(t *testing.T) { + g := NewWithT(t) + + tests := []struct { + name string + reference *sourcev1.OCIRepositoryRef + insecure bool + want sreconcile.Result + wantErr bool + wantErrMsg string + shouldSign bool + useDigest bool + addMultipleCerts bool + provideNoCert bool + beforeFunc func(obj *sourcev1.OCIRepository, tag, revision string) + assertConditions []metav1.Condition + }{ + { + name: "signed image should pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + shouldSign: true, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "unsigned image should not pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.5", + }, + wantErr: true, + useDigest: true, + wantErrMsg: "failed to verify the signature using provider 'notation': no signature is associated with \"\"", + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider '': no signature is associated with \"\", make sure the artifact was signed successfully"), + }, + }, + { + name: "verify failed before, removed from spec, remove condition", + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, "VerifyFailed", "fail msg") + obj.Spec.Verify = nil + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + }, + want: sreconcile.ResultSuccess, + }, + { + name: "same artifact, verified before, change in obj gen verify again", + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, + shouldSign: true, + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + // Set Verified with old observed generation and different reason/message. + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Verified", "verified") + // Set new object generation. + obj.SetGeneration(3) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "no verify for already verified, verified condition remains the same", + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, + shouldSign: true, + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { + // Artifact present and custom verified condition reason/message. + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Verified", "verified") + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, "Verified", "verified"), + }, + }, + { + name: "signed image on an insecure registry passes verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.6", + }, + shouldSign: true, + insecure: true, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "signed image on an insecure registry using digest as reference passes verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.6", + }, + shouldSign: true, + insecure: true, + useDigest: true, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "verification level audit and correct trust identity should pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.6", + }, + shouldSign: true, + insecure: true, + useDigest: true, + want: sreconcile.ResultSuccess, + addMultipleCerts: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), + }, + }, + { + name: "no cert provided should not pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.5", + }, + wantErr: true, + useDigest: true, + provideNoCert: true, + // no namespace but the namespace name should appear before the /notation-config + wantErrMsg: "failed to verify the signature using provider 'notation': no certificates found in secret '/notation-config", + want: sreconcile.ResultEmpty, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider '': no certificates found in secret '/notation-config"), + }, + }, + } + + clientBuilder := fakeclient.NewClientBuilder(). + WithScheme(testEnv.GetScheme()). + WithStatusSubresource(&sourcev1.OCIRepository{}) + + r := &OCIRepositoryReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), + } + + certTuple := testhelper.GetRSASelfSignedSigningCertTuple("notation self-signed certs for testing") + certs := []*x509.Certificate{certTuple.Cert} + + signer, err := signer.New(certTuple.PrivateKey, certs) + g.Expect(err).ToNot(HaveOccurred()) + + policyDocument := trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelStrict.Name, Override: map[trustpolicy.ValidationType]trustpolicy.ValidationAction{trustpolicy.TypeRevocation: trustpolicy.ActionSkip}}, + TrustStores: []string{"ca:valid-trust-store"}, + TrustedIdentities: []string{"*"}, + }, + }, + } + + tmpDir := t.TempDir() + + policy, err := json.Marshal(policyDocument) + g.Expect(err).NotTo(HaveOccurred()) + + caSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-trust-store", + Generation: 1, + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + }, + } + + g.Expect(r.Create(ctx, caSecret)).ToNot(HaveOccurred()) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + workspaceDir := t.TempDir() + server, err := setupRegistryServer(ctx, workspaceDir, registryOptions{ + withTLS: !tt.insecure, + }) + g.Expect(err).NotTo(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "verify-oci-source-signature-", + Generation: 1, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost), + Verify: &sourcev1.OCIRepositoryVerification{ + Provider: "notation", + }, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + data := map[string][]byte{} + + if tt.addMultipleCerts { + data["a.crt"] = testhelper.GetRSASelfSignedSigningCertTuple("a not used for signing").Cert.Raw + data["b.crt"] = testhelper.GetRSASelfSignedSigningCertTuple("b not used for signing").Cert.Raw + data["c.crt"] = testhelper.GetRSASelfSignedSigningCertTuple("c not used for signing").Cert.Raw + } + + if !tt.provideNoCert { + data["notation.crt"] = certTuple.Cert.Raw + } + + data["trustpolicy.json"] = policy + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "notation-config-", + }, + Data: data, + } + + g.Expect(r.Create(ctx, secret)).NotTo(HaveOccurred()) + + if tt.insecure { + obj.Spec.Insecure = true + } else { + obj.Spec.CertSecretRef = &meta.LocalObjectReference{ + Name: "valid-trust-store", + } + } + + obj.Spec.Verify.SecretRef = &meta.LocalObjectReference{Name: secret.GetName()} + + if tt.reference != nil { + obj.Spec.Reference = tt.reference + } + + podinfoVersions, err := pushMultiplePodinfoImages(server.registryHost, tt.insecure, tt.reference.Tag) + g.Expect(err).ToNot(HaveOccurred()) + + if tt.useDigest { + obj.Spec.Reference.Digest = podinfoVersions[tt.reference.Tag].digest.String() + } + + keychain, err := r.keychain(ctx, obj) + if err != nil { + g.Expect(err).ToNot(HaveOccurred()) + } + + opts := makeRemoteOptions(ctx, makeTransport(true), keychain, nil) + + artifactRef, err := r.getArtifactRef(obj, opts) + g.Expect(err).ToNot(HaveOccurred()) + + if tt.shouldSign { + remoteRepo, err := oras.NewRepository(artifactRef.String()) + g.Expect(err).ToNot(HaveOccurred()) + + if tt.insecure { + remoteRepo.PlainHTTP = true + } + + repo := registry.NewRepository(remoteRepo) + + signatureMediaType := cose.MediaTypeEnvelope + + signOptions := notation.SignOptions{ + SignerSignOptions: notation.SignerSignOptions{ + SignatureMediaType: signatureMediaType, + }, + ArtifactReference: artifactRef.String(), + } + + _, err = notation.Sign(ctx, signer, repo, signOptions) + g.Expect(err).ToNot(HaveOccurred()) + } + + image := podinfoVersions[tt.reference.Tag] + assertConditions := tt.assertConditions + for k := range assertConditions { + if tt.useDigest { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", image.digest.String()) + } else { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", fmt.Sprintf("%s@%s", tt.reference.Tag, image.digest.String())) + } + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", artifactRef.String()) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", "notation") + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj, image.tag, image.digest.String()) + } + + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) + defer func() { + g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) + g.Expect(r.Delete(ctx, secret)).NotTo(HaveOccurred()) + }() + + sp := patch.NewSerialPatcher(obj, r.Client) - // Wait for the object to be deleted - g.Expect(testEnv.Delete(ctx, &resultobj)).To(Succeed()) - g.Eventually(func() bool { - if err := testEnv.Get(ctx, key, &resultobj); err != nil { - return apierrors.IsNotFound(err) - } - return false - }, timeout).Should(BeTrue()) + artifact := &meta.Artifact{} + got, err := r.reconcileSource(ctx, sp, obj, artifact, tmpDir) + if tt.wantErr { + tt.wantErrMsg = strings.ReplaceAll(tt.wantErrMsg, "", artifactRef.String()) + g.Expect(err).ToNot(BeNil()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErrMsg)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) }) } } -func TestOCIRepository_reconcileSource_remoteReference(t *testing.T) { +func TestOCIRepository_reconcileSource_verifyOCISourceTrustPolicyNotation(t *testing.T) { g := NewWithT(t) - tmpDir := t.TempDir() - server, err := setupRegistryServer(ctx, tmpDir, registryOptions{}) - g.Expect(err).ToNot(HaveOccurred()) - t.Cleanup(func() { - server.Close() - }) - - podinfoVersions, err := pushMultiplePodinfoImages(server.registryHost, true, "6.1.4", "6.1.5", "6.1.6") - g.Expect(err).ToNot(HaveOccurred()) - - img6 := podinfoVersions["6.1.6"] - img5 := podinfoVersions["6.1.5"] - tests := []struct { - name string - reference *ociv1.OCIRepositoryRef - want sreconcile.Result - wantErr bool - wantRevision string - assertConditions []metav1.Condition + name string + reference *sourcev1.OCIRepositoryRef + signatureVerification trustpolicy.SignatureVerification + trustedIdentities []string + trustStores []string + want sreconcile.Result + wantErr bool + wantErrMsg string + useDigest bool + usePolicyJson bool + provideNoPolicy bool + policyJson string + beforeFunc func(obj *sourcev1.OCIRepository, tag, revision string) + assertConditions []metav1.Condition }{ { - name: "no reference (latest tag)", - want: sreconcile.ResultSuccess, - wantRevision: fmt.Sprintf("latest@%s", img6.digest.String()), + name: "verification level audit and incorrect trust identity should fail verification but not error", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", + }, + signatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelAudit.Name}, + trustedIdentities: []string{"x509.subject: C=US, ST=WA, L=Seattle, O=Notary, CN=example.com"}, + trustStores: []string{"ca:valid-trust-store"}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), - *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), }, }, { - name: "tag reference", - reference: &ociv1.OCIRepositoryRef{ - Tag: "6.1.6", + name: "verification level permissive and incorrect trust identity should fail verification and error", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", }, - want: sreconcile.ResultSuccess, - wantRevision: fmt.Sprintf("%s@%s", img6.tag, img6.digest.String()), + signatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelPermissive.Name}, + trustedIdentities: []string{"x509.subject: C=US, ST=WA, L=Seattle, O=Notary, CN=example.com"}, + trustStores: []string{"ca:valid-trust-store"}, + useDigest: true, + want: sreconcile.ResultEmpty, + wantErr: true, + wantErrMsg: "failed to verify the signature using provider 'notation': signature verification failed\nfailed to verify signature with digest , signing certificate from the digital signature does not match the X.509 trusted identities [map[\"C\":\"US\" \"CN\":\"example.com\" \"L\":\"Seattle\" \"O\":\"Notary\" \"ST\":\"WA\"]] defined in the trust policy \"test-statement-name\"", assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), - *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider 'notation': signature verification failed\nfailed to verify signature with digest , signing certificate from the digital signature does not match the X.509 trusted identities [map[\"C\":\"US\" \"CN\":\"example.com\" \"L\":\"Seattle\" \"O\":\"Notary\" \"ST\":\"WA\"]] defined in the trust policy \"test-statement-name\""), }, }, { - name: "semver reference", - reference: &ociv1.OCIRepositoryRef{ - SemVer: ">= 6.1.5", + name: "verification level permissive and correct trust identity should pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", }, - want: sreconcile.ResultSuccess, - wantRevision: fmt.Sprintf("%s@%s", img6.tag, img6.digest.String()), + signatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelPermissive.Name}, + trustedIdentities: []string{"*"}, + trustStores: []string{"ca:valid-trust-store"}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), - *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), }, }, { - name: "digest reference", - reference: &ociv1.OCIRepositoryRef{ - Digest: img6.digest.String(), + name: "verification level audit and correct trust identity should pass verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", }, - wantRevision: img6.digest.String(), - want: sreconcile.ResultSuccess, + signatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelAudit.Name}, + trustedIdentities: []string{"*"}, + trustStores: []string{"ca:valid-trust-store"}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), - *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of revision "), }, }, { - name: "invalid tag reference", - reference: &ociv1.OCIRepositoryRef{ - Tag: "6.1.0", + name: "verification level skip and should not be marked as verified", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", }, - want: sreconcile.ResultEmpty, - wantErr: true, + signatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelSkip.Name}, + trustedIdentities: []string{}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, ociv1.OCIPullFailedReason, " MANIFEST_UNKNOWN"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), }, }, { - name: "invalid semver reference", - reference: &ociv1.OCIRepositoryRef{ - SemVer: "<= 6.1.0", + name: "valid json but empty policy json should fail verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", }, - want: sreconcile.ResultEmpty, - wantErr: true, + usePolicyJson: true, + policyJson: "{}", + wantErr: true, + wantErrMsg: "trust policy document has empty version", + want: sreconcile.ResultEmpty, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.ReadOperationFailedReason, "failed to determine the artifact tag for 'oci://%s/podinfo': no match found for semver: <= 6.1.0", server.registryHost), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "trust policy document has empty version, version must be specified"), }, }, { - name: "invalid digest reference", - reference: &ociv1.OCIRepositoryRef{ - Digest: "invalid", + name: "empty string should fail verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", }, - want: sreconcile.ResultEmpty, - wantErr: true, + usePolicyJson: true, + policyJson: "", + wantErr: true, + wantErrMsg: fmt.Sprintf("error occurred while parsing %s: unexpected end of JSON input", snotation.DefaultTrustPolicyKey), + want: sreconcile.ResultEmpty, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, ociv1.OCIPullFailedReason, "failed to determine artifact digest"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "error occurred while parsing %s: unexpected end of JSON input", snotation.DefaultTrustPolicyKey), }, }, { - name: "semver should take precedence over tag", - reference: &ociv1.OCIRepositoryRef{ - SemVer: ">= 6.1.5", - Tag: "6.1.5", + name: "invalid character in string should fail verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", }, - want: sreconcile.ResultSuccess, - wantRevision: fmt.Sprintf("%s@%s", img6.tag, img6.digest.String()), + usePolicyJson: true, + policyJson: "{\"version\": \"1.0\u000A\", \"trust_policies\": []}", + wantErr: true, + wantErrMsg: fmt.Sprintf("error occurred while parsing %s: invalid character '\\n' in string literal", snotation.DefaultTrustPolicyKey), + want: sreconcile.ResultEmpty, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), - *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "error occurred while parsing %s: invalid character '\\n' in string literal", snotation.DefaultTrustPolicyKey), }, }, { - name: "digest should take precedence over semver", - reference: &ociv1.OCIRepositoryRef{ - Tag: "6.1.6", - SemVer: ">= 6.1.6", - Digest: img5.digest.String(), + name: "empty string should fail verification", + reference: &sourcev1.OCIRepositoryRef{ + Tag: "6.1.4", }, - want: sreconcile.ResultSuccess, - wantRevision: img5.digest.String(), + provideNoPolicy: true, + wantErr: true, + wantErrMsg: fmt.Sprintf("failed to verify the signature using provider 'notation': '%s' not found in secret '/notation", snotation.DefaultTrustPolicyKey), + want: sreconcile.ResultEmpty, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision"), - *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider 'notation': '%s' not found in secret '/notation", snotation.DefaultTrustPolicyKey), }, }, } clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&ociv1.OCIRepository{}) + WithStatusSubresource(&sourcev1.OCIRepository{}) r := &OCIRepositoryReconciler{ Client: clientBuilder.Build(), @@ -1125,25 +1815,153 @@ func TestOCIRepository_reconcileSource_remoteReference(t *testing.T) { patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), } + certTuple := testhelper.GetRSASelfSignedSigningCertTuple("notation self-signed certs for testing") + certs := []*x509.Certificate{certTuple.Cert} + + signer, err := signer.New(certTuple.PrivateKey, certs) + g.Expect(err).ToNot(HaveOccurred()) + + tmpDir := t.TempDir() + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - obj := &ociv1.OCIRepository{ + g := NewWithT(t) + + workspaceDir := t.TempDir() + server, err := setupRegistryServer(ctx, workspaceDir, registryOptions{}) + g.Expect(err).NotTo(HaveOccurred()) + t.Cleanup(func() { + server.Close() + }) + + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ - GenerateName: "checkout-strategy-", + GenerateName: "verify-oci-source-signature-", Generation: 1, }, - Spec: ociv1.OCIRepositorySpec{ - URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost), + Spec: sourcev1.OCIRepositorySpec{ + URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost), + Verify: &sourcev1.OCIRepositoryVerification{ + Provider: "notation", + }, Interval: metav1.Duration{Duration: interval}, Timeout: &metav1.Duration{Duration: timeout}, - Insecure: true, }, } + var policy []byte + + if !tt.usePolicyJson { + policyDocument := trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: tt.signatureVerification, + TrustStores: tt.trustStores, + TrustedIdentities: tt.trustedIdentities, + }, + }, + } + + policy, err = json.Marshal(policyDocument) + g.Expect(err).NotTo(HaveOccurred()) + } else { + policy = []byte(tt.policyJson) + } + + data := map[string][]byte{} + + if !tt.provideNoPolicy { + data["trustpolicy.json"] = policy + } + + data["notation.crt"] = certTuple.Cert.Raw + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "notation-", + }, + Data: data, + } + + g.Expect(r.Create(ctx, secret)).NotTo(HaveOccurred()) + + obj.Spec.Insecure = true + + obj.Spec.Verify.SecretRef = &meta.LocalObjectReference{Name: secret.GetName()} + if tt.reference != nil { obj.Spec.Reference = tt.reference } + podinfoVersions, err := pushMultiplePodinfoImages(server.registryHost, true, tt.reference.Tag) + g.Expect(err).ToNot(HaveOccurred()) + + if tt.useDigest { + obj.Spec.Reference.Digest = podinfoVersions[tt.reference.Tag].digest.String() + } + + keychain, err := r.keychain(ctx, obj) + if err != nil { + g.Expect(err).ToNot(HaveOccurred()) + } + + opts := makeRemoteOptions(ctx, makeTransport(true), keychain, nil) + + artifactRef, err := r.getArtifactRef(obj, opts) + g.Expect(err).ToNot(HaveOccurred()) + + remoteRepo, err := oras.NewRepository(artifactRef.String()) + g.Expect(err).ToNot(HaveOccurred()) + + remoteRepo.PlainHTTP = true + + repo := registry.NewRepository(remoteRepo) + + signatureMediaType := cose.MediaTypeEnvelope + + signOptions := notation.SignOptions{ + SignerSignOptions: notation.SignerSignOptions{ + SignatureMediaType: signatureMediaType, + }, + ArtifactReference: artifactRef.String(), + } + + _, err = notation.Sign(ctx, signer, repo, signOptions) + g.Expect(err).ToNot(HaveOccurred()) + + image := podinfoVersions[tt.reference.Tag] + signatureDigest := "" + + artifactDescriptor, err := repo.Resolve(ctx, image.tag) + g.Expect(err).ToNot(HaveOccurred()) + _ = repo.ListSignatures(ctx, artifactDescriptor, func(signatureManifests []ocispec.Descriptor) error { + g.Expect(len(signatureManifests)).Should(Equal(1)) + signatureDigest = signatureManifests[0].Digest.String() + return nil + }) + + assertConditions := tt.assertConditions + for k := range assertConditions { + if tt.useDigest { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", image.digest.String()) + } else { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", fmt.Sprintf("%s@%s", tt.reference.Tag, image.digest.String())) + } + + if signatureDigest != "" { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", signatureDigest) + } + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", artifactRef.String()) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", "notation") + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj, image.tag, image.digest.String()) + } + g.Expect(r.Client.Create(ctx, obj)).ToNot(HaveOccurred()) defer func() { g.Expect(r.Client.Delete(ctx, obj)).ToNot(HaveOccurred()) @@ -1151,40 +1969,43 @@ func TestOCIRepository_reconcileSource_remoteReference(t *testing.T) { sp := patch.NewSerialPatcher(obj, r.Client) - artifact := &sourcev1.Artifact{} - tmpDir := t.TempDir() + artifact := &meta.Artifact{} got, err := r.reconcileSource(ctx, sp, obj, artifact, tmpDir) + g.Expect(r.Delete(ctx, secret)).NotTo(HaveOccurred()) if tt.wantErr { - g.Expect(err).To(HaveOccurred()) + tt.wantErrMsg = strings.ReplaceAll(tt.wantErrMsg, "", artifactRef.String()) + if signatureDigest != "" { + tt.wantErrMsg = strings.ReplaceAll(tt.wantErrMsg, "", signatureDigest) + } + g.Expect(err).ToNot(BeNil()) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErrMsg)) } else { g.Expect(err).ToNot(HaveOccurred()) - g.Expect(artifact.Revision).To(Equal(tt.wantRevision)) } - g.Expect(got).To(Equal(tt.want)) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) }) } } -func TestOCIRepository_reconcileSource_verifyOCISourceSignature(t *testing.T) { +func TestOCIRepository_reconcileSource_verifyOCISourceSignatureCosign(t *testing.T) { g := NewWithT(t) tests := []struct { name string - reference *ociv1.OCIRepositoryRef + reference *sourcev1.OCIRepositoryRef insecure bool want sreconcile.Result wantErr bool wantErrMsg string shouldSign bool keyless bool - beforeFunc func(obj *ociv1.OCIRepository, tag, revision string) + beforeFunc func(obj *sourcev1.OCIRepository, tag, revision string) assertConditions []metav1.Condition }{ { name: "signed image should pass verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.4", }, shouldSign: true, @@ -1197,7 +2018,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature(t *testing.T) { }, { name: "unsigned image should not pass verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.5", }, wantErr: true, @@ -1211,7 +2032,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature(t *testing.T) { }, { name: "unsigned image should not pass keyless verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.5", }, wantErr: true, @@ -1220,25 +2041,25 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature(t *testing.T) { assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), - *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider ' keyless': no matching signatures"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider ' keyless': no signatures found"), }, }, { name: "verify failed before, removed from spec, remove condition", - reference: &ociv1.OCIRepositoryRef{Tag: "6.1.4"}, - beforeFunc: func(obj *ociv1.OCIRepository, tag, revision string) { + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, "VerifyFailed", "fail msg") obj.Spec.Verify = nil - obj.Status.Artifact = &sourcev1.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} }, want: sreconcile.ResultSuccess, }, { name: "same artifact, verified before, change in obj gen verify again", - reference: &ociv1.OCIRepositoryRef{Tag: "6.1.4"}, + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, shouldSign: true, - beforeFunc: func(obj *ociv1.OCIRepository, tag, revision string) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} // Set Verified with old observed generation and different reason/message. conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Verified", "verified") // Set new object generation. @@ -1251,11 +2072,11 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature(t *testing.T) { }, { name: "no verify for already verified, verified condition remains the same", - reference: &ociv1.OCIRepositoryRef{Tag: "6.1.4"}, + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, shouldSign: true, - beforeFunc: func(obj *ociv1.OCIRepository, tag, revision string) { + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { // Artifact present and custom verified condition reason/message. - obj.Status.Artifact = &sourcev1.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Verified", "verified") }, want: sreconcile.ResultSuccess, @@ -1265,7 +2086,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature(t *testing.T) { }, { name: "signed image on an insecure registry passes verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.6", }, shouldSign: true, @@ -1281,7 +2102,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&ociv1.OCIRepository{}) + WithStatusSubresource(&sourcev1.OCIRepository{}) r := &OCIRepositoryReconciler{ Client: clientBuilder.Build(), @@ -1337,14 +2158,14 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature(t *testing.T) { server.Close() }) - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "verify-oci-source-signature-", Generation: 1, }, - Spec: ociv1.OCIRepositorySpec{ + Spec: sourcev1.OCIRepositorySpec{ URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost), - Verify: &ociv1.OCIRepositoryVerification{ + Verify: &sourcev1.OCIRepositoryVerification{ Provider: "cosign", }, Interval: metav1.Duration{Duration: interval}, @@ -1420,7 +2241,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature(t *testing.T) { sp := patch.NewSerialPatcher(obj, r.Client) - artifact := &sourcev1.Artifact{} + artifact := &meta.Artifact{} got, err := r.reconcileSource(ctx, sp, obj, artifact, tmpDir) if tt.wantErr { tt.wantErrMsg = strings.ReplaceAll(tt.wantErrMsg, "", artifactRef.String()) @@ -1438,17 +2259,17 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature(t *testing.T) { func TestOCIRepository_reconcileSource_verifyOCISourceSignature_keyless(t *testing.T) { tests := []struct { name string - reference *ociv1.OCIRepositoryRef + reference *sourcev1.OCIRepositoryRef want sreconcile.Result wantErr bool wantErrMsg string - beforeFunc func(obj *ociv1.OCIRepository) + beforeFunc func(obj *sourcev1.OCIRepository) assertConditions []metav1.Condition revision string }{ { name: "signed image with no identity matching specified should pass verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.5.1", }, want: sreconcile.ResultSuccess, @@ -1461,12 +2282,12 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature_keyless(t *testi }, { name: "signed image with correct subject and issuer should pass verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.5.1", }, want: sreconcile.ResultSuccess, - beforeFunc: func(obj *ociv1.OCIRepository) { - obj.Spec.Verify.MatchOIDCIdentity = []ociv1.OIDCIdentityMatch{ + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.Verify.MatchOIDCIdentity = []sourcev1.OIDCIdentityMatch{ { Subject: "^https://github.com/stefanprodan/podinfo.*$", @@ -1483,12 +2304,12 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature_keyless(t *testi }, { name: "signed image with both incorrect and correct identity matchers should pass verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.5.1", }, want: sreconcile.ResultSuccess, - beforeFunc: func(obj *ociv1.OCIRepository) { - obj.Spec.Verify.MatchOIDCIdentity = []ociv1.OIDCIdentityMatch{ + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.Verify.MatchOIDCIdentity = []sourcev1.OIDCIdentityMatch{ { Subject: "intruder", Issuer: "^https://honeypot.com$", @@ -1509,13 +2330,13 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature_keyless(t *testi }, { name: "signed image with incorrect subject and issuer should not pass verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.5.1", }, wantErr: true, want: sreconcile.ResultEmpty, - beforeFunc: func(obj *ociv1.OCIRepository) { - obj.Spec.Verify.MatchOIDCIdentity = []ociv1.OIDCIdentityMatch{ + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.Verify.MatchOIDCIdentity = []sourcev1.OIDCIdentityMatch{ { Subject: "intruder", Issuer: "^https://honeypot.com$", @@ -1531,7 +2352,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature_keyless(t *testi }, { name: "unsigned image should not pass verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.0", }, wantErr: true, @@ -1539,7 +2360,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature_keyless(t *testi assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), - *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider ' keyless': no matching signatures"), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider ' keyless': no signatures found"), }, revision: "6.1.0@sha256:3816fe9636a297f0c934b1fa0f46fe4c068920375536ac2803604adfb4c55894", }, @@ -1547,7 +2368,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature_keyless(t *testi clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&ociv1.OCIRepository{}) + WithStatusSubresource(&sourcev1.OCIRepository{}) r := &OCIRepositoryReconciler{ Client: clientBuilder.Build(), @@ -1560,14 +2381,14 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature_keyless(t *testi t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "verify-oci-source-signature-", Generation: 1, }, - Spec: ociv1.OCIRepositorySpec{ + Spec: sourcev1.OCIRepositorySpec{ URL: "oci://ghcr.io/stefanprodan/manifests/podinfo", - Verify: &ociv1.OCIRepositoryVerification{ + Verify: &sourcev1.OCIRepositoryVerification{ Provider: "cosign", }, Interval: metav1.Duration{Duration: interval}, @@ -1595,7 +2416,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature_keyless(t *testi sp := patch.NewSerialPatcher(obj, r.Client) - artifact := &sourcev1.Artifact{} + artifact := &meta.Artifact{} got, err := r.reconcileSource(ctx, sp, obj, artifact, t.TempDir()) if tt.wantErr { g.Expect(err).To(HaveOccurred()) @@ -1630,101 +2451,101 @@ func TestOCIRepository_reconcileSource_noop(t *testing.T) { tests := []struct { name string - beforeFunc func(obj *ociv1.OCIRepository) - afterFunc func(g *WithT, artifact *sourcev1.Artifact) + beforeFunc func(obj *sourcev1.OCIRepository) + afterFunc func(g *WithT, artifact *meta.Artifact) }{ { name: "full reconcile - no existing artifact", - afterFunc: func(g *WithT, artifact *sourcev1.Artifact) { + afterFunc: func(g *WithT, artifact *meta.Artifact) { g.Expect(artifact.Metadata).ToNot(BeEmpty()) }, }, { name: "noop - artifact revisions match", - beforeFunc: func(obj *ociv1.OCIRepository) { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{ Revision: testRevision, } }, - afterFunc: func(g *WithT, artifact *sourcev1.Artifact) { + afterFunc: func(g *WithT, artifact *meta.Artifact) { g.Expect(artifact.Metadata).To(BeEmpty()) }, }, { name: "full reconcile - same rev, unobserved ignore", - beforeFunc: func(obj *ociv1.OCIRepository) { + beforeFunc: func(obj *sourcev1.OCIRepository) { obj.Status.ObservedIgnore = ptr.To("aaa") - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Revision: testRevision, } }, - afterFunc: func(g *WithT, artifact *sourcev1.Artifact) { + afterFunc: func(g *WithT, artifact *meta.Artifact) { g.Expect(artifact.Metadata).ToNot(BeEmpty()) }, }, { name: "noop - same rev, observed ignore", - beforeFunc: func(obj *ociv1.OCIRepository) { + beforeFunc: func(obj *sourcev1.OCIRepository) { obj.Spec.Ignore = ptr.To("aaa") obj.Status.ObservedIgnore = ptr.To("aaa") - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Revision: testRevision, } }, - afterFunc: func(g *WithT, artifact *sourcev1.Artifact) { + afterFunc: func(g *WithT, artifact *meta.Artifact) { g.Expect(artifact.Metadata).To(BeEmpty()) }, }, { name: "full reconcile - same rev, unobserved layer selector", - beforeFunc: func(obj *ociv1.OCIRepository) { - obj.Spec.LayerSelector = &ociv1.OCILayerSelector{ + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - Operation: ociv1.OCILayerCopy, + Operation: sourcev1.OCILayerCopy, } - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Revision: testRevision, } }, - afterFunc: func(g *WithT, artifact *sourcev1.Artifact) { + afterFunc: func(g *WithT, artifact *meta.Artifact) { g.Expect(artifact.Metadata).ToNot(BeEmpty()) }, }, { name: "noop - same rev, observed layer selector", - beforeFunc: func(obj *ociv1.OCIRepository) { - obj.Spec.LayerSelector = &ociv1.OCILayerSelector{ + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - Operation: ociv1.OCILayerCopy, + Operation: sourcev1.OCILayerCopy, } - obj.Status.ObservedLayerSelector = &ociv1.OCILayerSelector{ + obj.Status.ObservedLayerSelector = &sourcev1.OCILayerSelector{ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - Operation: ociv1.OCILayerCopy, + Operation: sourcev1.OCILayerCopy, } - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Revision: testRevision, } }, - afterFunc: func(g *WithT, artifact *sourcev1.Artifact) { + afterFunc: func(g *WithT, artifact *meta.Artifact) { g.Expect(artifact.Metadata).To(BeEmpty()) }, }, { name: "full reconcile - same rev, observed layer selector changed", - beforeFunc: func(obj *ociv1.OCIRepository) { - obj.Spec.LayerSelector = &ociv1.OCILayerSelector{ + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - Operation: ociv1.OCILayerExtract, + Operation: sourcev1.OCILayerExtract, } - obj.Status.ObservedLayerSelector = &ociv1.OCILayerSelector{ + obj.Status.ObservedLayerSelector = &sourcev1.OCILayerSelector{ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - Operation: ociv1.OCILayerCopy, + Operation: sourcev1.OCILayerCopy, } - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Revision: testRevision, } }, - afterFunc: func(g *WithT, artifact *sourcev1.Artifact) { + afterFunc: func(g *WithT, artifact *meta.Artifact) { g.Expect(artifact.Metadata).ToNot(BeEmpty()) }, }, @@ -1732,7 +2553,7 @@ func TestOCIRepository_reconcileSource_noop(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&ociv1.OCIRepository{}) + WithStatusSubresource(&sourcev1.OCIRepository{}) r := &OCIRepositoryReconciler{ Client: clientBuilder.Build(), @@ -1745,14 +2566,14 @@ func TestOCIRepository_reconcileSource_noop(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "noop-", Generation: 1, }, - Spec: ociv1.OCIRepositorySpec{ + Spec: sourcev1.OCIRepositorySpec{ URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost), - Reference: &ociv1.OCIRepositoryRef{Tag: "6.1.5"}, + Reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.5"}, Interval: metav1.Duration{Duration: interval}, Timeout: &metav1.Duration{Duration: timeout}, Insecure: true, @@ -1770,7 +2591,7 @@ func TestOCIRepository_reconcileSource_noop(t *testing.T) { sp := patch.NewSerialPatcher(obj, r.Client) - artifact := &sourcev1.Artifact{} + artifact := &meta.Artifact{} tmpDir := t.TempDir() got, err := r.reconcileSource(ctx, sp, obj, artifact, tmpDir) g.Expect(err).ToNot(HaveOccurred()) @@ -1787,29 +2608,29 @@ func TestOCIRepository_reconcileArtifact(t *testing.T) { tests := []struct { name string targetPath string - artifact *sourcev1.Artifact - beforeFunc func(obj *ociv1.OCIRepository) + artifact *meta.Artifact + beforeFunc func(obj *sourcev1.OCIRepository) want sreconcile.Result wantErr bool - assertArtifact *sourcev1.Artifact + assertArtifact *meta.Artifact assertPaths []string assertConditions []metav1.Condition - afterFunc func(g *WithT, obj *ociv1.OCIRepository) + afterFunc func(g *WithT, obj *sourcev1.OCIRepository) }{ { name: "Archiving Artifact creates correct files and condition", targetPath: "testdata/oci/repository", - artifact: &sourcev1.Artifact{ + artifact: &meta.Artifact{ Revision: "revision", }, - beforeFunc: func(obj *ociv1.OCIRepository) { + beforeFunc: func(obj *sourcev1.OCIRepository) { conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new revision") }, want: sreconcile.ResultSuccess, assertPaths: []string{ "latest.tar.gz", }, - afterFunc: func(g *WithT, obj *ociv1.OCIRepository) { + afterFunc: func(g *WithT, obj *sourcev1.OCIRepository) { g.Expect(obj.Status.Artifact.Digest).To(Equal("sha256:6a5bd135a816ec0ad246c41cfdd87629e40ef6520001aeb2d0118a703abe9e7a")) }, assertConditions: []metav1.Condition{ @@ -1819,15 +2640,15 @@ func TestOCIRepository_reconcileArtifact(t *testing.T) { { name: "Artifact with source ignore", targetPath: "testdata/oci/repository", - artifact: &sourcev1.Artifact{Revision: "revision"}, - beforeFunc: func(obj *ociv1.OCIRepository) { + artifact: &meta.Artifact{Revision: "revision"}, + beforeFunc: func(obj *sourcev1.OCIRepository) { obj.Spec.Ignore = ptr.To("foo.txt") }, want: sreconcile.ResultSuccess, assertPaths: []string{ "latest.tar.gz", }, - afterFunc: func(g *WithT, obj *ociv1.OCIRepository) { + afterFunc: func(g *WithT, obj *sourcev1.OCIRepository) { g.Expect(obj.Status.Artifact.Digest).To(Equal("sha256:9102e9c8626e48821a91a4963436f1673cd85f8fb3deb843c992f85b995c38ea")) }, assertConditions: []metav1.Condition{ @@ -1836,17 +2657,17 @@ func TestOCIRepository_reconcileArtifact(t *testing.T) { }, { name: "No status changes if artifact is already present", - artifact: &sourcev1.Artifact{ + artifact: &meta.Artifact{ Revision: "revision", }, targetPath: "testdata/oci/repository", want: sreconcile.ResultSuccess, - beforeFunc: func(obj *ociv1.OCIRepository) { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{ Revision: "revision", } }, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Revision: "revision", }, assertConditions: []metav1.Condition{ @@ -1856,18 +2677,18 @@ func TestOCIRepository_reconcileArtifact(t *testing.T) { { name: "Artifact already present, unobserved ignore, rebuild artifact", targetPath: "testdata/oci/repository", - artifact: &sourcev1.Artifact{ + artifact: &meta.Artifact{ Revision: "revision", }, - beforeFunc: func(obj *ociv1.OCIRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "revision"} + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "revision"} obj.Spec.Ignore = ptr.To("aaa") }, want: sreconcile.ResultSuccess, assertPaths: []string{ "latest.tar.gz", }, - afterFunc: func(g *WithT, obj *ociv1.OCIRepository) { + afterFunc: func(g *WithT, obj *sourcev1.OCIRepository) { g.Expect(*obj.Status.ObservedIgnore).To(Equal("aaa")) }, assertConditions: []metav1.Condition{ @@ -1877,18 +2698,18 @@ func TestOCIRepository_reconcileArtifact(t *testing.T) { { name: "Artifact already present, unobserved layer selector, rebuild artifact", targetPath: "testdata/oci/repository", - artifact: &sourcev1.Artifact{ + artifact: &meta.Artifact{ Revision: "revision", }, - beforeFunc: func(obj *ociv1.OCIRepository) { - obj.Spec.LayerSelector = &ociv1.OCILayerSelector{MediaType: "foo"} - obj.Status.Artifact = &sourcev1.Artifact{Revision: "revision"} + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{MediaType: "foo"} + obj.Status.Artifact = &meta.Artifact{Revision: "revision"} }, want: sreconcile.ResultSuccess, assertPaths: []string{ "latest.tar.gz", }, - afterFunc: func(g *WithT, obj *ociv1.OCIRepository) { + afterFunc: func(g *WithT, obj *sourcev1.OCIRepository) { g.Expect(obj.Status.ObservedLayerSelector.MediaType).To(Equal("foo")) }, assertConditions: []metav1.Condition{ @@ -1898,24 +2719,24 @@ func TestOCIRepository_reconcileArtifact(t *testing.T) { { name: "Artifact already present, observed layer selector changed, rebuild artifact", targetPath: "testdata/oci/repository", - artifact: &sourcev1.Artifact{ + artifact: &meta.Artifact{ Revision: "revision", Path: "foo.txt", }, - beforeFunc: func(obj *ociv1.OCIRepository) { - obj.Spec.LayerSelector = &ociv1.OCILayerSelector{ + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{ MediaType: "foo", - Operation: ociv1.OCILayerCopy, + Operation: sourcev1.OCILayerCopy, } - obj.Status.Artifact = &sourcev1.Artifact{Revision: "revision"} + obj.Status.Artifact = &meta.Artifact{Revision: "revision"} }, want: sreconcile.ResultSuccess, assertPaths: []string{ "latest.tar.gz", }, - afterFunc: func(g *WithT, obj *ociv1.OCIRepository) { + afterFunc: func(g *WithT, obj *sourcev1.OCIRepository) { g.Expect(obj.Status.ObservedLayerSelector.MediaType).To(Equal("foo")) - g.Expect(obj.Status.ObservedLayerSelector.Operation).To(Equal(ociv1.OCILayerCopy)) + g.Expect(obj.Status.ObservedLayerSelector.Operation).To(Equal(sourcev1.OCILayerCopy)) }, assertConditions: []metav1.Condition{ *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for digest"), @@ -1924,18 +2745,18 @@ func TestOCIRepository_reconcileArtifact(t *testing.T) { { name: "Artifact already present, observed ignore and layer selector, up-to-date", targetPath: "testdata/oci/repository", - artifact: &sourcev1.Artifact{ + artifact: &meta.Artifact{ Revision: "revision", }, - beforeFunc: func(obj *ociv1.OCIRepository) { + beforeFunc: func(obj *sourcev1.OCIRepository) { obj.Spec.Ignore = ptr.To("aaa") - obj.Spec.LayerSelector = &ociv1.OCILayerSelector{MediaType: "foo"} - obj.Status.Artifact = &sourcev1.Artifact{Revision: "revision"} + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{MediaType: "foo"} + obj.Status.Artifact = &meta.Artifact{Revision: "revision"} obj.Status.ObservedIgnore = ptr.To("aaa") - obj.Status.ObservedLayerSelector = &ociv1.OCILayerSelector{MediaType: "foo"} + obj.Status.ObservedLayerSelector = &sourcev1.OCILayerSelector{MediaType: "foo"} }, want: sreconcile.ResultSuccess, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Revision: "revision", }, assertConditions: []metav1.Condition{ @@ -1964,7 +2785,7 @@ func TestOCIRepository_reconcileArtifact(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&ociv1.OCIRepository{}) + WithStatusSubresource(&sourcev1.OCIRepository{}) r := &OCIRepositoryReconciler{ Client: clientBuilder.Build(), @@ -1979,7 +2800,7 @@ func TestOCIRepository_reconcileArtifact(t *testing.T) { _ = resetChmod(tt.targetPath, 0o755, 0o644) - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "reconcile-artifact-", Generation: 1, @@ -1989,7 +2810,7 @@ func TestOCIRepository_reconcileArtifact(t *testing.T) { tt.beforeFunc(obj) } - artifact := &sourcev1.Artifact{} + artifact := &meta.Artifact{} if tt.artifact != nil { artifact = tt.artifact } @@ -2039,13 +2860,20 @@ func TestOCIRepository_getArtifactRef(t *testing.T) { server.Close() }) - imgs, err := pushMultiplePodinfoImages(server.registryHost, true, "6.1.4", "6.1.5", "6.1.6") + imgs, err := pushMultiplePodinfoImages(server.registryHost, true, + "6.1.4", + "6.1.5-beta.1", + "6.1.5-rc.1", + "6.1.5", + "6.1.6-rc.1", + "6.1.6", + ) g.Expect(err).ToNot(HaveOccurred()) tests := []struct { name string url string - reference *ociv1.OCIRepositoryRef + reference *sourcev1.OCIRepositoryRef wantErr bool want string }{ @@ -2057,7 +2885,7 @@ func TestOCIRepository_getArtifactRef(t *testing.T) { { name: "valid url with tag reference", url: "oci://ghcr.io/stefanprodan/charts", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.6", }, want: "ghcr.io/stefanprodan/charts:6.1.6", @@ -2065,7 +2893,7 @@ func TestOCIRepository_getArtifactRef(t *testing.T) { { name: "valid url with digest reference", url: "oci://ghcr.io/stefanprodan/charts", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Digest: imgs["6.1.6"].digest.String(), }, want: "ghcr.io/stefanprodan/charts@" + imgs["6.1.6"].digest.String(), @@ -2073,7 +2901,7 @@ func TestOCIRepository_getArtifactRef(t *testing.T) { { name: "valid url with semver reference", url: fmt.Sprintf("oci://%s/podinfo", server.registryHost), - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ SemVer: ">= 6.1.6", }, want: server.registryHost + "/podinfo:6.1.6", @@ -2083,11 +2911,29 @@ func TestOCIRepository_getArtifactRef(t *testing.T) { url: "ghcr.io/stefanprodan/charts", wantErr: true, }, + { + name: "valid url with semver filter", + url: fmt.Sprintf("oci://%s/podinfo", server.registryHost), + reference: &sourcev1.OCIRepositoryRef{ + SemVer: ">= 6.1.x-0", + SemverFilter: ".*-rc.*", + }, + want: server.registryHost + "/podinfo:6.1.6-rc.1", + }, + { + name: "valid url with semver filter and unexisting version", + url: fmt.Sprintf("oci://%s/podinfo", server.registryHost), + reference: &sourcev1.OCIRepositoryRef{ + SemVer: ">= 6.1.x-0", + SemverFilter: ".*-alpha.*", + }, + wantErr: true, + }, } clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&ociv1.OCIRepository{}) + WithStatusSubresource(&sourcev1.OCIRepository{}) r := &OCIRepositoryReconciler{ Client: clientBuilder.Build(), @@ -2098,11 +2944,11 @@ func TestOCIRepository_getArtifactRef(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "artifact-url-", }, - Spec: ociv1.OCIRepositorySpec{ + Spec: sourcev1.OCIRepositorySpec{ URL: tt.url, Interval: metav1.Duration{Duration: interval}, Timeout: &metav1.Duration{Duration: timeout}, @@ -2126,19 +2972,19 @@ func TestOCIRepository_getArtifactRef(t *testing.T) { } } -func TestOCIRepository_stalled(t *testing.T) { +func TestOCIRepository_invalidURL(t *testing.T) { g := NewWithT(t) - ns, err := testEnv.CreateNamespace(ctx, "ocirepository-stalled-test") + ns, err := testEnv.CreateNamespace(ctx, "ocirepository-invalid-url-test") g.Expect(err).ToNot(HaveOccurred()) defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "ocirepository-reconcile", Namespace: ns.Name, }, - Spec: ociv1.OCIRepositorySpec{ + Spec: sourcev1.OCIRepositorySpec{ URL: "oci://ghcr.io/test/test:v1", Interval: metav1.Duration{Duration: 60 * time.Minute}, }, @@ -2147,7 +2993,7 @@ func TestOCIRepository_stalled(t *testing.T) { g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} - resultobj := ociv1.OCIRepository{} + resultobj := sourcev1.OCIRepository{} // Wait for the object to fail g.Eventually(func() bool { @@ -2168,24 +3014,92 @@ func TestOCIRepository_stalled(t *testing.T) { g.Expect(stalledCondition.Reason).Should(Equal(sourcev1.URLInvalidReason)) } +func TestOCIRepository_objectLevelWorkloadIdentityFeatureGate(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "ocirepository-olwifg-test") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() + + err = testEnv.Create(ctx, &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns.Name, + Name: "test", + }, + }) + g.Expect(err).NotTo(HaveOccurred()) + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "ocirepository-reconcile", + Namespace: ns.Name, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: "oci://ghcr.io/stefanprodan/manifests/podinfo", + Interval: metav1.Duration{Duration: 60 * time.Minute}, + Provider: "aws", + ServiceAccountName: "test", + }, + } + + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + resultobj := &sourcev1.OCIRepository{} + + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, resultobj); err != nil { + return false + } + return conditions.IsStalled(resultobj) + }).Should(BeTrue()) + + stalledCondition := conditions.Get(resultobj, meta.StalledCondition) + g.Expect(stalledCondition).ToNot(BeNil()) + g.Expect(stalledCondition.Reason).Should(Equal(meta.FeatureGateDisabledReason)) + g.Expect(stalledCondition.Message).Should(Equal("to use spec.serviceAccountName for provider authentication please enable the ObjectLevelWorkloadIdentity feature gate in the controller")) + + auth.EnableObjectLevelWorkloadIdentity() + t.Cleanup(auth.DisableObjectLevelWorkloadIdentity) + + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, resultobj); err != nil { + return false + } + resultobj.Annotations = map[string]string{ + meta.ReconcileRequestAnnotation: time.Now().Format(time.RFC3339), + } + return testEnv.Update(ctx, resultobj) == nil + }).Should(BeTrue()) + + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, resultobj); err != nil { + return false + } + logOCIRepoStatus(t, resultobj) + return !conditions.IsReady(resultobj) && + conditions.GetReason(resultobj, meta.ReadyCondition) == sourcev1.AuthenticationFailedReason + }).Should(BeTrue()) +} + func TestOCIRepository_reconcileStorage(t *testing.T) { tests := []struct { name string - beforeFunc func(obj *ociv1.OCIRepository, storage *Storage) error + beforeFunc func(obj *sourcev1.OCIRepository, storage *storage.Storage) error want sreconcile.Result wantErr bool assertConditions []metav1.Condition - assertArtifact *sourcev1.Artifact + assertArtifact *meta.Artifact assertPaths []string }{ { name: "garbage collects", - beforeFunc: func(obj *ociv1.OCIRepository, storage *Storage) error { + beforeFunc: func(obj *sourcev1.OCIRepository, storage *storage.Storage) error { revisions := []string{"a", "b", "c", "d"} for n := range revisions { v := revisions[n] - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/oci-reconcile-storage/%s.txt", v), Revision: v, } @@ -2206,7 +3120,7 @@ func TestOCIRepository_reconcileStorage(t *testing.T) { conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") return nil }, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Path: "/oci-reconcile-storage/d.txt", Revision: "d", Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", @@ -2234,8 +3148,8 @@ func TestOCIRepository_reconcileStorage(t *testing.T) { }, { name: "notices missing artifact in storage", - beforeFunc: func(obj *ociv1.OCIRepository, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.OCIRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ Path: "/oci-reconcile-storage/invalid.txt", Revision: "e", } @@ -2253,10 +3167,10 @@ func TestOCIRepository_reconcileStorage(t *testing.T) { }, { name: "notices empty artifact digest", - beforeFunc: func(obj *ociv1.OCIRepository, storage *Storage) error { + beforeFunc: func(obj *sourcev1.OCIRepository, storage *storage.Storage) error { f := "empty-digest.txt" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/oci-reconcile-storage/%s.txt", f), Revision: "fake", } @@ -2284,10 +3198,10 @@ func TestOCIRepository_reconcileStorage(t *testing.T) { }, { name: "notices artifact digest mismatch", - beforeFunc: func(obj *ociv1.OCIRepository, storage *Storage) error { + beforeFunc: func(obj *sourcev1.OCIRepository, storage *storage.Storage) error { f := "digest-mismatch.txt" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/oci-reconcile-storage/%s.txt", f), Revision: "fake", } @@ -2315,8 +3229,8 @@ func TestOCIRepository_reconcileStorage(t *testing.T) { }, { name: "updates hostname on diff from current", - beforeFunc: func(obj *ociv1.OCIRepository, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.OCIRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ Path: "/oci-reconcile-storage/hostname.txt", Revision: "f", Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", @@ -2335,7 +3249,7 @@ func TestOCIRepository_reconcileStorage(t *testing.T) { assertPaths: []string{ "/oci-reconcile-storage/hostname.txt", }, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Path: "/oci-reconcile-storage/hostname.txt", Revision: "f", Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", @@ -2350,7 +3264,7 @@ func TestOCIRepository_reconcileStorage(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&ociv1.OCIRepository{}) + WithStatusSubresource(&sourcev1.OCIRepository{}) r := &OCIRepositoryReconciler{ Client: clientBuilder.Build(), @@ -2363,7 +3277,7 @@ func TestOCIRepository_reconcileStorage(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test-", Generation: 1, @@ -2381,7 +3295,7 @@ func TestOCIRepository_reconcileStorage(t *testing.T) { sp := patch.NewSerialPatcher(obj, r.Client) - got, err := r.reconcileStorage(ctx, sp, obj, &sourcev1.Artifact{}, "") + got, err := r.reconcileStorage(ctx, sp, obj, &meta.Artifact{}, "") if tt.wantErr { g.Expect(err).To(HaveOccurred()) } else { @@ -2422,7 +3336,7 @@ func TestOCIRepository_ReconcileDelete(t *testing.T) { patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), } - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ Name: "reconcile-delete-", DeletionTimestamp: &metav1.Time{Time: time.Now()}, @@ -2430,10 +3344,10 @@ func TestOCIRepository_ReconcileDelete(t *testing.T) { sourcev1.SourceFinalizer, }, }, - Status: ociv1.OCIRepositoryStatus{}, + Status: sourcev1.OCIRepositoryStatus{}, } - artifact := testStorage.NewArtifactFor(ociv1.OCIRepositoryKind, obj.GetObjectMeta(), "revision", "foo.txt") + artifact := testStorage.NewArtifactFor(sourcev1.OCIRepositoryKind, obj.GetObjectMeta(), "revision", "foo.txt") obj.Status.Artifact = &artifact got, err := r.reconcileDelete(ctx, obj) @@ -2452,8 +3366,8 @@ func TestOCIRepositoryReconciler_notify(t *testing.T) { name string res sreconcile.Result resErr error - oldObjBeforeFunc func(obj *ociv1.OCIRepository) - newObjBeforeFunc func(obj *ociv1.OCIRepository) + oldObjBeforeFunc func(obj *sourcev1.OCIRepository) + newObjBeforeFunc func(obj *sourcev1.OCIRepository) commit git.Commit wantEvent string }{ @@ -2466,9 +3380,9 @@ func TestOCIRepositoryReconciler_notify(t *testing.T) { name: "new artifact", res: sreconcile.ResultSuccess, resErr: nil, - newObjBeforeFunc: func(obj *ociv1.OCIRepository) { + newObjBeforeFunc: func(obj *sourcev1.OCIRepository) { obj.Spec.URL = "oci://newurl.io" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Revision: "xxx", Digest: "yyy", Metadata: map[string]string{ @@ -2483,14 +3397,14 @@ func TestOCIRepositoryReconciler_notify(t *testing.T) { name: "recovery from failure", res: sreconcile.ResultSuccess, resErr: nil, - oldObjBeforeFunc: func(obj *ociv1.OCIRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.ReadOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, - newObjBeforeFunc: func(obj *ociv1.OCIRepository) { + newObjBeforeFunc: func(obj *sourcev1.OCIRepository) { obj.Spec.URL = "oci://newurl.io" - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, wantEvent: "Normal Succeeded stored artifact with revision 'xxx' from 'oci://newurl.io'", @@ -2499,14 +3413,14 @@ func TestOCIRepositoryReconciler_notify(t *testing.T) { name: "recovery and new artifact", res: sreconcile.ResultSuccess, resErr: nil, - oldObjBeforeFunc: func(obj *ociv1.OCIRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.ReadOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, - newObjBeforeFunc: func(obj *ociv1.OCIRepository) { + newObjBeforeFunc: func(obj *sourcev1.OCIRepository) { obj.Spec.URL = "oci://newurl.io" - obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Digest: "bbb"} + obj.Status.Artifact = &meta.Artifact{Revision: "aaa", Digest: "bbb"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, wantEvent: "Normal NewArtifact stored artifact with revision 'aaa' from 'oci://newurl.io'", @@ -2515,12 +3429,12 @@ func TestOCIRepositoryReconciler_notify(t *testing.T) { name: "no updates", res: sreconcile.ResultSuccess, resErr: nil, - oldObjBeforeFunc: func(obj *ociv1.OCIRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, - newObjBeforeFunc: func(obj *ociv1.OCIRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + newObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, }, @@ -2528,8 +3442,8 @@ func TestOCIRepositoryReconciler_notify(t *testing.T) { name: "no updates on requeue", res: sreconcile.ResultRequeue, resErr: nil, - oldObjBeforeFunc: func(obj *ociv1.OCIRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "ready") }, }, @@ -2540,7 +3454,7 @@ func TestOCIRepositoryReconciler_notify(t *testing.T) { g := NewWithT(t) recorder := record.NewFakeRecorder(32) - oldObj := &ociv1.OCIRepository{} + oldObj := &sourcev1.OCIRepository{} newObj := oldObj.DeepCopy() if tt.oldObjBeforeFunc != nil { @@ -2668,112 +3582,112 @@ func setPodinfoImageAnnotations(img gcrv1.Image, tag string) gcrv1.Image { func TestOCIContentConfigChanged(t *testing.T) { tests := []struct { name string - spec ociv1.OCIRepositorySpec - status ociv1.OCIRepositoryStatus + spec sourcev1.OCIRepositorySpec + status sourcev1.OCIRepositoryStatus want bool }{ { name: "same ignore, no layer selector", - spec: ociv1.OCIRepositorySpec{ + spec: sourcev1.OCIRepositorySpec{ Ignore: ptr.To("nnn"), }, - status: ociv1.OCIRepositoryStatus{ + status: sourcev1.OCIRepositoryStatus{ ObservedIgnore: ptr.To("nnn"), }, want: false, }, { name: "different ignore, no layer selector", - spec: ociv1.OCIRepositorySpec{ + spec: sourcev1.OCIRepositorySpec{ Ignore: ptr.To("nnn"), }, - status: ociv1.OCIRepositoryStatus{ + status: sourcev1.OCIRepositoryStatus{ ObservedIgnore: ptr.To("mmm"), }, want: true, }, { name: "same ignore, same layer selector", - spec: ociv1.OCIRepositorySpec{ + spec: sourcev1.OCIRepositorySpec{ Ignore: ptr.To("nnn"), - LayerSelector: &ociv1.OCILayerSelector{ + LayerSelector: &sourcev1.OCILayerSelector{ MediaType: "foo", - Operation: ociv1.OCILayerExtract, + Operation: sourcev1.OCILayerExtract, }, }, - status: ociv1.OCIRepositoryStatus{ + status: sourcev1.OCIRepositoryStatus{ ObservedIgnore: ptr.To("nnn"), - ObservedLayerSelector: &ociv1.OCILayerSelector{ + ObservedLayerSelector: &sourcev1.OCILayerSelector{ MediaType: "foo", - Operation: ociv1.OCILayerExtract, + Operation: sourcev1.OCILayerExtract, }, }, want: false, }, { name: "same ignore, different layer selector operation", - spec: ociv1.OCIRepositorySpec{ + spec: sourcev1.OCIRepositorySpec{ Ignore: ptr.To("nnn"), - LayerSelector: &ociv1.OCILayerSelector{ + LayerSelector: &sourcev1.OCILayerSelector{ MediaType: "foo", - Operation: ociv1.OCILayerCopy, + Operation: sourcev1.OCILayerCopy, }, }, - status: ociv1.OCIRepositoryStatus{ + status: sourcev1.OCIRepositoryStatus{ ObservedIgnore: ptr.To("nnn"), - ObservedLayerSelector: &ociv1.OCILayerSelector{ + ObservedLayerSelector: &sourcev1.OCILayerSelector{ MediaType: "foo", - Operation: ociv1.OCILayerExtract, + Operation: sourcev1.OCILayerExtract, }, }, want: true, }, { name: "same ignore, different layer selector mediatype", - spec: ociv1.OCIRepositorySpec{ + spec: sourcev1.OCIRepositorySpec{ Ignore: ptr.To("nnn"), - LayerSelector: &ociv1.OCILayerSelector{ + LayerSelector: &sourcev1.OCILayerSelector{ MediaType: "bar", - Operation: ociv1.OCILayerExtract, + Operation: sourcev1.OCILayerExtract, }, }, - status: ociv1.OCIRepositoryStatus{ + status: sourcev1.OCIRepositoryStatus{ ObservedIgnore: ptr.To("nnn"), - ObservedLayerSelector: &ociv1.OCILayerSelector{ + ObservedLayerSelector: &sourcev1.OCILayerSelector{ MediaType: "foo", - Operation: ociv1.OCILayerExtract, + Operation: sourcev1.OCILayerExtract, }, }, want: true, }, { name: "no ignore, same layer selector", - spec: ociv1.OCIRepositorySpec{ - LayerSelector: &ociv1.OCILayerSelector{ + spec: sourcev1.OCIRepositorySpec{ + LayerSelector: &sourcev1.OCILayerSelector{ MediaType: "foo", - Operation: ociv1.OCILayerExtract, + Operation: sourcev1.OCILayerExtract, }, }, - status: ociv1.OCIRepositoryStatus{ - ObservedLayerSelector: &ociv1.OCILayerSelector{ + status: sourcev1.OCIRepositoryStatus{ + ObservedLayerSelector: &sourcev1.OCILayerSelector{ MediaType: "foo", - Operation: ociv1.OCILayerExtract, + Operation: sourcev1.OCILayerExtract, }, }, want: false, }, { name: "no ignore, different layer selector", - spec: ociv1.OCIRepositorySpec{ - LayerSelector: &ociv1.OCILayerSelector{ + spec: sourcev1.OCIRepositorySpec{ + LayerSelector: &sourcev1.OCILayerSelector{ MediaType: "bar", - Operation: ociv1.OCILayerExtract, + Operation: sourcev1.OCILayerExtract, }, }, - status: ociv1.OCIRepositoryStatus{ - ObservedLayerSelector: &ociv1.OCILayerSelector{ + status: sourcev1.OCIRepositoryStatus{ + ObservedLayerSelector: &sourcev1.OCILayerSelector{ MediaType: "foo", - Operation: ociv1.OCILayerExtract, + Operation: sourcev1.OCILayerExtract, }, }, want: true, @@ -2784,7 +3698,7 @@ func TestOCIContentConfigChanged(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ Spec: tt.spec, Status: tt.status, } diff --git a/internal/controller/storage.go b/internal/controller/storage.go deleted file mode 100644 index af4b79a70..000000000 --- a/internal/controller/storage.go +++ /dev/null @@ -1,719 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "archive/tar" - "compress/gzip" - "context" - "fmt" - "io" - "io/fs" - "net/url" - "os" - "path/filepath" - "sort" - "strings" - "time" - - securejoin "github.com/cyphar/filepath-securejoin" - "github.com/go-git/go-git/v5/plumbing/format/gitignore" - "github.com/opencontainers/go-digest" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kerrors "k8s.io/apimachinery/pkg/util/errors" - - "github.com/fluxcd/pkg/lockedfile" - "github.com/fluxcd/pkg/sourceignore" - pkgtar "github.com/fluxcd/pkg/tar" - - v1 "github.com/fluxcd/source-controller/api/v1" - intdigest "github.com/fluxcd/source-controller/internal/digest" - sourcefs "github.com/fluxcd/source-controller/internal/fs" -) - -const GarbageCountLimit = 1000 - -const ( - // defaultFileMode is the permission mode applied to files inside an artifact archive. - defaultFileMode int64 = 0o600 - // defaultDirMode is the permission mode applied to all directories inside an artifact archive. - defaultDirMode int64 = 0o750 - // defaultExeFileMode is the permission mode applied to executable files inside an artifact archive. - defaultExeFileMode int64 = 0o700 -) - -// Storage manages artifacts -type Storage struct { - // BasePath is the local directory path where the source artifacts are stored. - BasePath string `json:"basePath"` - - // Hostname is the file server host name used to compose the artifacts URIs. - Hostname string `json:"hostname"` - - // ArtifactRetentionTTL is the duration of time that artifacts will be kept - // in storage before being garbage collected. - ArtifactRetentionTTL time.Duration `json:"artifactRetentionTTL"` - - // ArtifactRetentionRecords is the maximum number of artifacts to be kept in - // storage after a garbage collection. - ArtifactRetentionRecords int `json:"artifactRetentionRecords"` -} - -// NewStorage creates the storage helper for a given path and hostname. -func NewStorage(basePath string, hostname string, artifactRetentionTTL time.Duration, artifactRetentionRecords int) (*Storage, error) { - if f, err := os.Stat(basePath); os.IsNotExist(err) || !f.IsDir() { - return nil, fmt.Errorf("invalid dir path: %s", basePath) - } - return &Storage{ - BasePath: basePath, - Hostname: hostname, - ArtifactRetentionTTL: artifactRetentionTTL, - ArtifactRetentionRecords: artifactRetentionRecords, - }, nil -} - -// NewArtifactFor returns a new v1.Artifact. -func (s Storage) NewArtifactFor(kind string, metadata metav1.Object, revision, fileName string) v1.Artifact { - path := v1.ArtifactPath(kind, metadata.GetNamespace(), metadata.GetName(), fileName) - artifact := v1.Artifact{ - Path: path, - Revision: revision, - } - s.SetArtifactURL(&artifact) - return artifact -} - -// SetArtifactURL sets the URL on the given v1.Artifact. -func (s Storage) SetArtifactURL(artifact *v1.Artifact) { - if artifact.Path == "" { - return - } - format := "http://%s/%s" - if strings.HasPrefix(s.Hostname, "http://") || strings.HasPrefix(s.Hostname, "https://") { - format = "%s/%s" - } - artifact.URL = fmt.Sprintf(format, s.Hostname, strings.TrimLeft(artifact.Path, "/")) -} - -// SetHostname sets the hostname of the given URL string to the current Storage.Hostname and returns the result. -func (s Storage) SetHostname(URL string) string { - u, err := url.Parse(URL) - if err != nil { - return "" - } - u.Host = s.Hostname - return u.String() -} - -// MkdirAll calls os.MkdirAll for the given v1.Artifact base dir. -func (s Storage) MkdirAll(artifact v1.Artifact) error { - dir := filepath.Dir(s.LocalPath(artifact)) - return os.MkdirAll(dir, 0o700) -} - -// Remove calls os.Remove for the given v1.Artifact path. -func (s Storage) Remove(artifact v1.Artifact) error { - return os.Remove(s.LocalPath(artifact)) -} - -// RemoveAll calls os.RemoveAll for the given v1.Artifact base dir. -func (s Storage) RemoveAll(artifact v1.Artifact) (string, error) { - var deletedDir string - dir := filepath.Dir(s.LocalPath(artifact)) - // Check if the dir exists. - _, err := os.Stat(dir) - if err == nil { - deletedDir = dir - } - return deletedDir, os.RemoveAll(dir) -} - -// RemoveAllButCurrent removes all files for the given v1.Artifact base dir, excluding the current one. -func (s Storage) RemoveAllButCurrent(artifact v1.Artifact) ([]string, error) { - deletedFiles := []string{} - localPath := s.LocalPath(artifact) - dir := filepath.Dir(localPath) - var errors []string - _ = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - errors = append(errors, err.Error()) - return nil - } - - if path != localPath && !info.IsDir() && info.Mode()&os.ModeSymlink != os.ModeSymlink { - if err := os.Remove(path); err != nil { - errors = append(errors, info.Name()) - } else { - // Collect the successfully deleted file paths. - deletedFiles = append(deletedFiles, path) - } - } - return nil - }) - - if len(errors) > 0 { - return deletedFiles, fmt.Errorf("failed to remove files: %s", strings.Join(errors, " ")) - } - return deletedFiles, nil -} - -// getGarbageFiles returns all files that need to be garbage collected for the given artifact. -// Garbage files are determined based on the below flow: -// 1. collect all artifact files with an expired ttl -// 2. if we satisfy maxItemsToBeRetained, then return -// 3. else, collect all artifact files till the latest n files remain, where n=maxItemsToBeRetained -func (s Storage) getGarbageFiles(artifact v1.Artifact, totalCountLimit, maxItemsToBeRetained int, ttl time.Duration) (garbageFiles []string, _ error) { - localPath := s.LocalPath(artifact) - dir := filepath.Dir(localPath) - artifactFilesWithCreatedTs := make(map[time.Time]string) - // sortedPaths contain all files sorted according to their created ts. - sortedPaths := []string{} - now := time.Now().UTC() - totalArtifactFiles := 0 - var errors []string - creationTimestamps := []time.Time{} - _ = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { - if err != nil { - errors = append(errors, err.Error()) - return nil - } - if totalArtifactFiles >= totalCountLimit { - return fmt.Errorf("reached file walking limit, already walked over: %d", totalArtifactFiles) - } - info, err := d.Info() - if err != nil { - errors = append(errors, err.Error()) - return nil - } - createdAt := info.ModTime().UTC() - diff := now.Sub(createdAt) - // Compare the time difference between now and the time at which the file was created - // with the provided TTL. Delete if the difference is greater than the TTL. Since the - // below logic just deals with determining if an artifact needs to be garbage collected, - // we avoid all lock files, adding them at the end to the list of garbage files. - expired := diff > ttl - if !info.IsDir() && info.Mode()&os.ModeSymlink != os.ModeSymlink && filepath.Ext(path) != ".lock" { - if path != localPath && expired { - garbageFiles = append(garbageFiles, path) - } - totalArtifactFiles += 1 - artifactFilesWithCreatedTs[createdAt] = path - creationTimestamps = append(creationTimestamps, createdAt) - } - return nil - - }) - if len(errors) > 0 { - return nil, fmt.Errorf("can't walk over file: %s", strings.Join(errors, ",")) - } - - // We already collected enough garbage files to satisfy the no. of max - // items that are supposed to be retained, so exit early. - if totalArtifactFiles-len(garbageFiles) < maxItemsToBeRetained { - return garbageFiles, nil - } - - // sort all timestamps in ascending order. - sort.Slice(creationTimestamps, func(i, j int) bool { return creationTimestamps[i].Before(creationTimestamps[j]) }) - for _, ts := range creationTimestamps { - path, ok := artifactFilesWithCreatedTs[ts] - if !ok { - return garbageFiles, fmt.Errorf("failed to fetch file for created ts: %v", ts) - } - sortedPaths = append(sortedPaths, path) - } - - var collected int - noOfGarbageFiles := len(garbageFiles) - for _, path := range sortedPaths { - if path != localPath && filepath.Ext(path) != ".lock" && !stringInSlice(path, garbageFiles) { - // If we previously collected some garbage files with an expired ttl, then take that into account - // when checking whether we need to remove more files to satisfy the max no. of items allowed - // in the filesystem, along with the no. of files already removed in this loop. - if noOfGarbageFiles > 0 { - if (len(sortedPaths) - collected - len(garbageFiles)) > maxItemsToBeRetained { - garbageFiles = append(garbageFiles, path) - collected += 1 - } - } else { - if len(sortedPaths)-collected > maxItemsToBeRetained { - garbageFiles = append(garbageFiles, path) - collected += 1 - } - } - } - } - - return garbageFiles, nil -} - -// GarbageCollect removes all garbage files in the artifact dir according to the provided -// retention options. -func (s Storage) GarbageCollect(ctx context.Context, artifact v1.Artifact, timeout time.Duration) ([]string, error) { - delFilesChan := make(chan []string) - errChan := make(chan error) - // Abort if it takes more than the provided timeout duration. - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - go func() { - garbageFiles, err := s.getGarbageFiles(artifact, GarbageCountLimit, s.ArtifactRetentionRecords, s.ArtifactRetentionTTL) - if err != nil { - errChan <- err - return - } - var errors []error - var deleted []string - if len(garbageFiles) > 0 { - for _, file := range garbageFiles { - err := os.Remove(file) - if err != nil { - errors = append(errors, err) - } else { - deleted = append(deleted, file) - } - // If a lock file exists for this garbage artifact, remove that too. - lockFile := file + ".lock" - if _, err = os.Lstat(lockFile); err == nil { - err = os.Remove(lockFile) - if err != nil { - errors = append(errors, err) - } - } - } - } - if len(errors) > 0 { - errChan <- kerrors.NewAggregate(errors) - return - } - delFilesChan <- deleted - }() - - for { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case delFiles := <-delFilesChan: - return delFiles, nil - case err := <-errChan: - return nil, err - } - } -} - -func stringInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } - } - return false -} - -// ArtifactExist returns a boolean indicating whether the v1.Artifact exists in storage and is a regular file. -func (s Storage) ArtifactExist(artifact v1.Artifact) bool { - fi, err := os.Lstat(s.LocalPath(artifact)) - if err != nil { - return false - } - return fi.Mode().IsRegular() -} - -// VerifyArtifact verifies if the Digest of the v1.Artifact matches the digest -// of the file in Storage. It returns an error if the digests don't match, or -// if it can't be verified. -func (s Storage) VerifyArtifact(artifact v1.Artifact) error { - if artifact.Digest == "" { - return fmt.Errorf("artifact has no digest") - } - - d, err := digest.Parse(artifact.Digest) - if err != nil { - return fmt.Errorf("failed to parse artifact digest '%s': %w", artifact.Digest, err) - } - - f, err := os.Open(s.LocalPath(artifact)) - if err != nil { - return err - } - defer f.Close() - - verifier := d.Verifier() - if _, err = io.Copy(verifier, f); err != nil { - return err - } - if !verifier.Verified() { - return fmt.Errorf("computed digest doesn't match '%s'", d.String()) - } - return nil -} - -// ArchiveFileFilter must return true if a file should not be included in the archive after inspecting the given path -// and/or os.FileInfo. -type ArchiveFileFilter func(p string, fi os.FileInfo) bool - -// SourceIgnoreFilter returns an ArchiveFileFilter that filters out files matching sourceignore.VCSPatterns and any of -// the provided patterns. -// If an empty gitignore.Pattern slice is given, the matcher is set to sourceignore.NewDefaultMatcher. -func SourceIgnoreFilter(ps []gitignore.Pattern, domain []string) ArchiveFileFilter { - matcher := sourceignore.NewDefaultMatcher(ps, domain) - if len(ps) > 0 { - ps = append(sourceignore.VCSPatterns(domain), ps...) - matcher = sourceignore.NewMatcher(ps) - } - return func(p string, fi os.FileInfo) bool { - return matcher.Match(strings.Split(p, string(filepath.Separator)), fi.IsDir()) - } -} - -// Archive atomically archives the given directory as a tarball to the given v1.Artifact path, excluding -// directories and any ArchiveFileFilter matches. While archiving, any environment specific data (for example, -// the user and group name) is stripped from file headers. -// If successful, it sets the digest and last update time on the artifact. -func (s Storage) Archive(artifact *v1.Artifact, dir string, filter ArchiveFileFilter) (err error) { - if f, err := os.Stat(dir); os.IsNotExist(err) || !f.IsDir() { - return fmt.Errorf("invalid dir path: %s", dir) - } - - localPath := s.LocalPath(*artifact) - tf, err := os.CreateTemp(filepath.Split(localPath)) - if err != nil { - return err - } - tmpName := tf.Name() - defer func() { - if err != nil { - os.Remove(tmpName) - } - }() - - d := intdigest.Canonical.Digester() - sz := &writeCounter{} - mw := io.MultiWriter(d.Hash(), tf, sz) - - gw := gzip.NewWriter(mw) - tw := tar.NewWriter(gw) - if err := filepath.Walk(dir, func(p string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - // Ignore anything that is not a file or directories e.g. symlinks - if m := fi.Mode(); !(m.IsRegular() || m.IsDir()) { - return nil - } - - // Skip filtered files - if filter != nil && filter(p, fi) { - return nil - } - - header, err := tar.FileInfoHeader(fi, p) - if err != nil { - return err - } - - // The name needs to be modified to maintain directory structure - // as tar.FileInfoHeader only has access to the base name of the file. - // Ref: https://golang.org/src/archive/tar/common.go?#L626 - relFilePath := p - if filepath.IsAbs(dir) { - relFilePath, err = filepath.Rel(dir, p) - if err != nil { - return err - } - } - sanitizeHeader(relFilePath, header) - - if err := tw.WriteHeader(header); err != nil { - return err - } - - if !fi.Mode().IsRegular() { - return nil - } - f, err := os.Open(p) - if err != nil { - f.Close() - return err - } - if _, err := io.Copy(tw, f); err != nil { - f.Close() - return err - } - return f.Close() - }); err != nil { - tw.Close() - gw.Close() - tf.Close() - return err - } - - if err := tw.Close(); err != nil { - gw.Close() - tf.Close() - return err - } - if err := gw.Close(); err != nil { - tf.Close() - return err - } - if err := tf.Close(); err != nil { - return err - } - - if err := os.Chmod(tmpName, 0o600); err != nil { - return err - } - - if err := sourcefs.RenameWithFallback(tmpName, localPath); err != nil { - return err - } - - artifact.Digest = d.Digest().String() - artifact.LastUpdateTime = metav1.Now() - artifact.Size = &sz.written - - return nil -} - -// AtomicWriteFile atomically writes the io.Reader contents to the v1.Artifact path. -// If successful, it sets the digest and last update time on the artifact. -func (s Storage) AtomicWriteFile(artifact *v1.Artifact, reader io.Reader, mode os.FileMode) (err error) { - localPath := s.LocalPath(*artifact) - tf, err := os.CreateTemp(filepath.Split(localPath)) - if err != nil { - return err - } - tfName := tf.Name() - defer func() { - if err != nil { - os.Remove(tfName) - } - }() - - d := intdigest.Canonical.Digester() - sz := &writeCounter{} - mw := io.MultiWriter(tf, d.Hash(), sz) - - if _, err := io.Copy(mw, reader); err != nil { - tf.Close() - return err - } - if err := tf.Close(); err != nil { - return err - } - - if err := os.Chmod(tfName, mode); err != nil { - return err - } - - if err := sourcefs.RenameWithFallback(tfName, localPath); err != nil { - return err - } - - artifact.Digest = d.Digest().String() - artifact.LastUpdateTime = metav1.Now() - artifact.Size = &sz.written - - return nil -} - -// Copy atomically copies the io.Reader contents to the v1.Artifact path. -// If successful, it sets the digest and last update time on the artifact. -func (s Storage) Copy(artifact *v1.Artifact, reader io.Reader) (err error) { - localPath := s.LocalPath(*artifact) - tf, err := os.CreateTemp(filepath.Split(localPath)) - if err != nil { - return err - } - tfName := tf.Name() - defer func() { - if err != nil { - os.Remove(tfName) - } - }() - - d := intdigest.Canonical.Digester() - sz := &writeCounter{} - mw := io.MultiWriter(tf, d.Hash(), sz) - - if _, err := io.Copy(mw, reader); err != nil { - tf.Close() - return err - } - if err := tf.Close(); err != nil { - return err - } - - if err := sourcefs.RenameWithFallback(tfName, localPath); err != nil { - return err - } - - artifact.Digest = d.Digest().String() - artifact.LastUpdateTime = metav1.Now() - artifact.Size = &sz.written - - return nil -} - -// CopyFromPath atomically copies the contents of the given path to the path of the v1.Artifact. -// If successful, the digest and last update time on the artifact is set. -func (s Storage) CopyFromPath(artifact *v1.Artifact, path string) (err error) { - f, err := os.Open(path) - if err != nil { - return err - } - defer func() { - if cerr := f.Close(); cerr != nil && err == nil { - err = cerr - } - }() - err = s.Copy(artifact, f) - return err -} - -// CopyToPath copies the contents in the (sub)path of the given artifact to the given path. -func (s Storage) CopyToPath(artifact *v1.Artifact, subPath, toPath string) error { - // create a tmp directory to store artifact - tmp, err := os.MkdirTemp("", "flux-include-") - if err != nil { - return err - } - defer os.RemoveAll(tmp) - - // read artifact file content - localPath := s.LocalPath(*artifact) - f, err := os.Open(localPath) - if err != nil { - return err - } - defer f.Close() - - // untar the artifact - untarPath := filepath.Join(tmp, "unpack") - if err = pkgtar.Untar(f, untarPath, pkgtar.WithMaxUntarSize(-1)); err != nil { - return err - } - - // create the destination parent dir - if err = os.MkdirAll(filepath.Dir(toPath), os.ModePerm); err != nil { - return err - } - - // copy the artifact content to the destination dir - fromPath, err := securejoin.SecureJoin(untarPath, subPath) - if err != nil { - return err - } - if err := sourcefs.RenameWithFallback(fromPath, toPath); err != nil { - return err - } - return nil -} - -// Symlink creates or updates a symbolic link for the given v1.Artifact and returns the URL for the symlink. -func (s Storage) Symlink(artifact v1.Artifact, linkName string) (string, error) { - localPath := s.LocalPath(artifact) - dir := filepath.Dir(localPath) - link := filepath.Join(dir, linkName) - tmpLink := link + ".tmp" - - if err := os.Remove(tmpLink); err != nil && !os.IsNotExist(err) { - return "", err - } - - if err := os.Symlink(localPath, tmpLink); err != nil { - return "", err - } - - if err := os.Rename(tmpLink, link); err != nil { - return "", err - } - - return fmt.Sprintf("http://%s/%s", s.Hostname, filepath.Join(filepath.Dir(artifact.Path), linkName)), nil -} - -// Lock creates a file lock for the given v1.Artifact. -func (s Storage) Lock(artifact v1.Artifact) (unlock func(), err error) { - lockFile := s.LocalPath(artifact) + ".lock" - mutex := lockedfile.MutexAt(lockFile) - return mutex.Lock() -} - -// LocalPath returns the secure local path of the given artifact (that is: relative to the Storage.BasePath). -func (s Storage) LocalPath(artifact v1.Artifact) string { - if artifact.Path == "" { - return "" - } - path, err := securejoin.SecureJoin(s.BasePath, artifact.Path) - if err != nil { - return "" - } - return path -} - -// writeCounter is an implementation of io.Writer that only records the number -// of bytes written. -type writeCounter struct { - written int64 -} - -func (wc *writeCounter) Write(p []byte) (int, error) { - n := len(p) - wc.written += int64(n) - return n, nil -} - -// sanitizeHeader modifies the tar.Header to be relative to the root of the -// archive and removes any environment specific data. -func sanitizeHeader(relP string, h *tar.Header) { - // Modify the name to be relative to the root of the archive, - // this ensures we maintain the same structure when extracting. - h.Name = relP - - // We want to remove any environment specific data as well, this - // ensures the checksum is purely content based. - h.Gid = 0 - h.Uid = 0 - h.Uname = "" - h.Gname = "" - h.ModTime = time.Time{} - h.AccessTime = time.Time{} - h.ChangeTime = time.Time{} - - // Override the mode to be the default for the type of file. - setDefaultMode(h) -} - -// setDefaultMode sets the default mode for the given header. -func setDefaultMode(h *tar.Header) { - if h.FileInfo().IsDir() { - h.Mode = defaultDirMode - return - } - - if h.FileInfo().Mode().IsRegular() { - mode := h.FileInfo().Mode() - if mode&os.ModeType == 0 && mode&0o111 != 0 { - h.Mode = defaultExeFileMode - return - } - h.Mode = defaultFileMode - return - } -} diff --git a/internal/controller/storage_test.go b/internal/controller/storage_test.go deleted file mode 100644 index 1b65ce914..000000000 --- a/internal/controller/storage_test.go +++ /dev/null @@ -1,853 +0,0 @@ -/* -Copyright 2020, 2021 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "context" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/go-git/go-git/v5/plumbing/format/gitignore" - . "github.com/onsi/gomega" - - sourcev1 "github.com/fluxcd/source-controller/api/v1" -) - -func TestStorageConstructor(t *testing.T) { - dir := t.TempDir() - - if _, err := NewStorage("/nonexistent", "hostname", time.Minute, 2); err == nil { - t.Fatal("nonexistent path was allowable in storage constructor") - } - - f, err := os.CreateTemp(dir, "") - if err != nil { - t.Fatalf("while creating temporary file: %v", err) - } - f.Close() - - if _, err := NewStorage(f.Name(), "hostname", time.Minute, 2); err == nil { - os.Remove(f.Name()) - t.Fatal("file path was accepted as basedir") - } - os.Remove(f.Name()) - - if _, err := NewStorage(dir, "hostname", time.Minute, 2); err != nil { - t.Fatalf("Valid path did not successfully return: %v", err) - } -} - -// walks a tar.gz and looks for paths with the basename. It does not match -// symlinks properly at this time because that's painful. -func walkTar(tarFile string, match string, dir bool) (int64, int64, bool, error) { - f, err := os.Open(tarFile) - if err != nil { - return 0, 0, false, fmt.Errorf("could not open file: %w", err) - } - defer f.Close() - - gzr, err := gzip.NewReader(f) - if err != nil { - return 0, 0, false, fmt.Errorf("could not unzip file: %w", err) - } - defer gzr.Close() - - tr := tar.NewReader(gzr) - for { - header, err := tr.Next() - if err == io.EOF { - break - } else if err != nil { - return 0, 0, false, fmt.Errorf("corrupt tarball reading header: %w", err) - } - - switch header.Typeflag { - case tar.TypeDir: - if header.Name == match && dir { - return 0, header.Mode, true, nil - } - case tar.TypeReg: - if header.Name == match { - return header.Size, header.Mode, true, nil - } - default: - // skip - } - } - - return 0, 0, false, nil -} - -func TestStorage_Archive(t *testing.T) { - dir := t.TempDir() - - storage, err := NewStorage(dir, "hostname", time.Minute, 2) - if err != nil { - t.Fatalf("error while bootstrapping storage: %v", err) - } - - type dummyFile struct { - content []byte - mode int64 - } - - createFiles := func(files map[string]dummyFile) (dir string, err error) { - dir = t.TempDir() - for name, df := range files { - absPath := filepath.Join(dir, name) - if err = os.MkdirAll(filepath.Dir(absPath), 0o750); err != nil { - return - } - f, err := os.Create(absPath) - if err != nil { - return "", fmt.Errorf("could not create file %q: %w", absPath, err) - } - if n, err := f.Write(df.content); err != nil { - f.Close() - return "", fmt.Errorf("could not write %d bytes to file %q: %w", n, f.Name(), err) - } - f.Close() - - if df.mode != 0 { - if err = os.Chmod(absPath, os.FileMode(df.mode)); err != nil { - return "", fmt.Errorf("could not chmod file %q: %w", absPath, err) - } - } - } - return - } - - matchFiles := func(t *testing.T, storage *Storage, artifact sourcev1.Artifact, files map[string]dummyFile, dirs []string) { - t.Helper() - for name, df := range files { - mustExist := !(name[0:1] == "!") - if !mustExist { - name = name[1:] - } - s, m, exist, err := walkTar(storage.LocalPath(artifact), name, false) - if err != nil { - t.Fatalf("failed reading tarball: %v", err) - } - if bs := int64(len(df.content)); s != bs { - t.Fatalf("%q size %v != %v", name, s, bs) - } - if exist != mustExist { - if mustExist { - t.Errorf("could not find file %q in tarball", name) - } else { - t.Errorf("tarball contained excluded file %q", name) - } - } - expectMode := df.mode - if expectMode == 0 { - expectMode = defaultFileMode - } - if exist && m != expectMode { - t.Fatalf("%q mode %v != %v", name, m, expectMode) - } - } - for _, name := range dirs { - mustExist := !(name[0:1] == "!") - if !mustExist { - name = name[1:] - } - _, m, exist, err := walkTar(storage.LocalPath(artifact), name, true) - if err != nil { - t.Fatalf("failed reading tarball: %v", err) - } - if exist != mustExist { - if mustExist { - t.Errorf("could not find dir %q in tarball", name) - } else { - t.Errorf("tarball contained excluded file %q", name) - } - } - if exist && m != defaultDirMode { - t.Fatalf("%q mode %v != %v", name, m, defaultDirMode) - } - - } - } - - tests := []struct { - name string - files map[string]dummyFile - filter ArchiveFileFilter - want map[string]dummyFile - wantDirs []string - wantErr bool - }{ - { - name: "no filter", - files: map[string]dummyFile{ - ".git/config": {}, - "file.jpg": {content: []byte(`contents`)}, - "manifest.yaml": {}, - }, - filter: nil, - want: map[string]dummyFile{ - ".git/config": {}, - "file.jpg": {content: []byte(`contents`)}, - "manifest.yaml": {}, - }, - }, - { - name: "exclude VCS", - files: map[string]dummyFile{ - ".git/config": {}, - "manifest.yaml": {}, - }, - wantDirs: []string{ - "!.git", - }, - filter: SourceIgnoreFilter(nil, nil), - want: map[string]dummyFile{ - "!.git/config": {}, - "manifest.yaml": {}, - }, - }, - { - name: "custom", - files: map[string]dummyFile{ - ".git/config": {}, - "custom": {}, - "horse.jpg": {}, - }, - filter: SourceIgnoreFilter([]gitignore.Pattern{ - gitignore.ParsePattern("custom", nil), - }, nil), - want: map[string]dummyFile{ - "!git/config": {}, - "!custom": {}, - "horse.jpg": {}, - }, - wantErr: false, - }, - { - name: "including directories", - files: map[string]dummyFile{ - "test/.gitkeep": {}, - }, - filter: SourceIgnoreFilter([]gitignore.Pattern{ - gitignore.ParsePattern("custom", nil), - }, nil), - wantDirs: []string{ - "test", - }, - wantErr: false, - }, - { - name: "sets default file modes", - files: map[string]dummyFile{ - "test/file": { - mode: 0o666, - }, - "test/executable": { - mode: 0o777, - }, - }, - want: map[string]dummyFile{ - "test/file": { - mode: defaultFileMode, - }, - "test/executable": { - mode: defaultExeFileMode, - }, - }, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - dir, err := createFiles(tt.files) - if err != nil { - t.Error(err) - return - } - defer os.RemoveAll(dir) - artifact := sourcev1.Artifact{ - Path: filepath.Join(randStringRunes(10), randStringRunes(10), randStringRunes(10)+".tar.gz"), - } - if err := storage.MkdirAll(artifact); err != nil { - t.Fatalf("artifact directory creation failed: %v", err) - } - if err := storage.Archive(&artifact, dir, tt.filter); (err != nil) != tt.wantErr { - t.Errorf("Archive() error = %v, wantErr %v", err, tt.wantErr) - } - matchFiles(t, storage, artifact, tt.want, tt.wantDirs) - }) - } -} - -func TestStorage_Remove(t *testing.T) { - t.Run("removes file", func(t *testing.T) { - g := NewWithT(t) - - dir := t.TempDir() - - s, err := NewStorage(dir, "", 0, 0) - g.Expect(err).ToNot(HaveOccurred()) - - artifact := sourcev1.Artifact{ - Path: filepath.Join(dir, "test.txt"), - } - g.Expect(s.MkdirAll(artifact)).To(Succeed()) - g.Expect(s.AtomicWriteFile(&artifact, bytes.NewReader([]byte("test")), 0o600)).To(Succeed()) - g.Expect(s.ArtifactExist(artifact)).To(BeTrue()) - - g.Expect(s.Remove(artifact)).To(Succeed()) - g.Expect(s.ArtifactExist(artifact)).To(BeFalse()) - }) - - t.Run("error if file does not exist", func(t *testing.T) { - g := NewWithT(t) - - dir := t.TempDir() - - s, err := NewStorage(dir, "", 0, 0) - g.Expect(err).ToNot(HaveOccurred()) - - artifact := sourcev1.Artifact{ - Path: filepath.Join(dir, "test.txt"), - } - - err = s.Remove(artifact) - g.Expect(err).To(HaveOccurred()) - g.Expect(errors.Is(err, os.ErrNotExist)).To(BeTrue()) - }) -} - -func TestStorageRemoveAllButCurrent(t *testing.T) { - t.Run("bad directory in archive", func(t *testing.T) { - dir := t.TempDir() - - s, err := NewStorage(dir, "hostname", time.Minute, 2) - if err != nil { - t.Fatalf("Valid path did not successfully return: %v", err) - } - - if _, err := s.RemoveAllButCurrent(sourcev1.Artifact{Path: filepath.Join(dir, "really", "nonexistent")}); err == nil { - t.Fatal("Did not error while pruning non-existent path") - } - }) - - t.Run("collect names of deleted items", func(t *testing.T) { - g := NewWithT(t) - dir := t.TempDir() - - s, err := NewStorage(dir, "hostname", time.Minute, 2) - g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage") - - artifact := sourcev1.Artifact{ - Path: filepath.Join("foo", "bar", "artifact1.tar.gz"), - } - - // Create artifact dir and artifacts. - artifactDir := filepath.Join(dir, "foo", "bar") - g.Expect(os.MkdirAll(artifactDir, 0o750)).NotTo(HaveOccurred()) - current := []string{ - filepath.Join(artifactDir, "artifact1.tar.gz"), - } - wantDeleted := []string{ - filepath.Join(artifactDir, "file1.txt"), - filepath.Join(artifactDir, "file2.txt"), - } - createFile := func(files []string) { - for _, c := range files { - f, err := os.Create(c) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(f.Close()).ToNot(HaveOccurred()) - } - } - createFile(current) - createFile(wantDeleted) - _, err = s.Symlink(artifact, "latest.tar.gz") - g.Expect(err).ToNot(HaveOccurred(), "failed to create symlink") - - deleted, err := s.RemoveAllButCurrent(artifact) - g.Expect(err).ToNot(HaveOccurred(), "failed to remove all but current") - g.Expect(deleted).To(Equal(wantDeleted)) - }) -} - -func TestStorageRemoveAll(t *testing.T) { - tests := []struct { - name string - artifactPath string - createArtifactPath bool - wantDeleted string - }{ - { - name: "delete non-existent path", - artifactPath: filepath.Join("foo", "bar", "artifact1.tar.gz"), - createArtifactPath: false, - wantDeleted: "", - }, - { - name: "delete existing path", - artifactPath: filepath.Join("foo", "bar", "artifact1.tar.gz"), - createArtifactPath: true, - wantDeleted: filepath.Join("foo", "bar"), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - dir := t.TempDir() - - s, err := NewStorage(dir, "hostname", time.Minute, 2) - g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage") - - artifact := sourcev1.Artifact{ - Path: tt.artifactPath, - } - - if tt.createArtifactPath { - g.Expect(os.MkdirAll(filepath.Join(dir, tt.artifactPath), 0o750)).ToNot(HaveOccurred()) - } - - deleted, err := s.RemoveAll(artifact) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(deleted).To(ContainSubstring(tt.wantDeleted), "unexpected deleted path") - }) - } -} - -func TestStorageCopyFromPath(t *testing.T) { - type File struct { - Name string - Content []byte - } - - dir := t.TempDir() - - storage, err := NewStorage(dir, "hostname", time.Minute, 2) - if err != nil { - t.Fatalf("error while bootstrapping storage: %v", err) - } - - createFile := func(file *File) (absPath string, err error) { - dir = t.TempDir() - absPath = filepath.Join(dir, file.Name) - if err = os.MkdirAll(filepath.Dir(absPath), 0o750); err != nil { - return - } - f, err := os.Create(absPath) - if err != nil { - return "", fmt.Errorf("could not create file %q: %w", absPath, err) - } - if n, err := f.Write(file.Content); err != nil { - f.Close() - return "", fmt.Errorf("could not write %d bytes to file %q: %w", n, f.Name(), err) - } - f.Close() - return - } - - matchFile := func(t *testing.T, storage *Storage, artifact sourcev1.Artifact, file *File, expectMismatch bool) { - c, err := os.ReadFile(storage.LocalPath(artifact)) - if err != nil { - t.Fatalf("failed reading file: %v", err) - } - if (string(c) != string(file.Content)) != expectMismatch { - t.Errorf("artifact content does not match and not expecting mismatch, got: %q, want: %q", string(c), string(file.Content)) - } - } - - tests := []struct { - name string - file *File - want *File - expectMismatch bool - }{ - { - name: "content match", - file: &File{ - Name: "manifest.yaml", - Content: []byte(`contents`), - }, - want: &File{ - Name: "manifest.yaml", - Content: []byte(`contents`), - }, - }, - { - name: "content not match", - file: &File{ - Name: "manifest.yaml", - Content: []byte(`contents`), - }, - want: &File{ - Name: "manifest.yaml", - Content: []byte(`mismatch contents`), - }, - expectMismatch: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - absPath, err := createFile(tt.file) - if err != nil { - t.Error(err) - return - } - artifact := sourcev1.Artifact{ - Path: filepath.Join(randStringRunes(10), randStringRunes(10), randStringRunes(10)), - } - if err := storage.MkdirAll(artifact); err != nil { - t.Fatalf("artifact directory creation failed: %v", err) - } - if err := storage.CopyFromPath(&artifact, absPath); err != nil { - t.Errorf("CopyFromPath() error = %v", err) - } - matchFile(t, storage, artifact, tt.want, tt.expectMismatch) - }) - } -} - -func TestStorage_getGarbageFiles(t *testing.T) { - artifactFolder := filepath.Join("foo", "bar") - tests := []struct { - name string - artifactPaths []string - createPause time.Duration - ttl time.Duration - maxItemsToBeRetained int - totalCountLimit int - wantDeleted []string - }{ - { - name: "delete files based on maxItemsToBeRetained", - artifactPaths: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact3.tar.gz"), - filepath.Join(artifactFolder, "artifact4.tar.gz"), - filepath.Join(artifactFolder, "artifact5.tar.gz"), - }, - createPause: time.Millisecond * 10, - ttl: time.Minute * 2, - totalCountLimit: 10, - maxItemsToBeRetained: 2, - wantDeleted: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact3.tar.gz"), - }, - }, - { - name: "delete files based on maxItemsToBeRetained, ignore lock files", - artifactPaths: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact1.tar.gz.lock"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz.lock"), - filepath.Join(artifactFolder, "artifact3.tar.gz"), - filepath.Join(artifactFolder, "artifact3.tar.gz.lock"), - filepath.Join(artifactFolder, "artifact4.tar.gz"), - filepath.Join(artifactFolder, "artifact5.tar.gz"), - }, - createPause: time.Millisecond * 10, - ttl: time.Minute * 2, - totalCountLimit: 10, - maxItemsToBeRetained: 2, - wantDeleted: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact3.tar.gz"), - }, - }, - { - name: "delete files based on ttl", - artifactPaths: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact3.tar.gz"), - filepath.Join(artifactFolder, "artifact4.tar.gz"), - filepath.Join(artifactFolder, "artifact5.tar.gz"), - }, - createPause: time.Second * 1, - ttl: time.Second*3 + time.Millisecond*500, - totalCountLimit: 10, - maxItemsToBeRetained: 4, - wantDeleted: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - }, - }, - { - name: "delete files based on ttl, ignore lock files", - artifactPaths: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact1.tar.gz.lock"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz.lock"), - filepath.Join(artifactFolder, "artifact3.tar.gz"), - filepath.Join(artifactFolder, "artifact4.tar.gz"), - filepath.Join(artifactFolder, "artifact5.tar.gz"), - }, - createPause: time.Second * 1, - ttl: time.Second*3 + time.Millisecond*500, - totalCountLimit: 10, - maxItemsToBeRetained: 4, - wantDeleted: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - }, - }, - { - name: "delete files based on ttl and maxItemsToBeRetained", - artifactPaths: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact3.tar.gz"), - filepath.Join(artifactFolder, "artifact4.tar.gz"), - filepath.Join(artifactFolder, "artifact5.tar.gz"), - filepath.Join(artifactFolder, "artifact6.tar.gz"), - }, - createPause: time.Second * 1, - ttl: time.Second*5 + time.Millisecond*500, - totalCountLimit: 10, - maxItemsToBeRetained: 4, - wantDeleted: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - }, - }, - { - name: "delete files based on ttl and maxItemsToBeRetained and totalCountLimit", - artifactPaths: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact3.tar.gz"), - filepath.Join(artifactFolder, "artifact4.tar.gz"), - filepath.Join(artifactFolder, "artifact5.tar.gz"), - filepath.Join(artifactFolder, "artifact6.tar.gz"), - }, - createPause: time.Millisecond * 500, - ttl: time.Millisecond * 500, - totalCountLimit: 3, - maxItemsToBeRetained: 2, - wantDeleted: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact3.tar.gz"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - dir := t.TempDir() - - s, err := NewStorage(dir, "hostname", tt.ttl, tt.maxItemsToBeRetained) - g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage") - - artifact := sourcev1.Artifact{ - Path: tt.artifactPaths[len(tt.artifactPaths)-1], - } - g.Expect(os.MkdirAll(filepath.Join(dir, artifactFolder), 0o750)).ToNot(HaveOccurred()) - for _, artifactPath := range tt.artifactPaths { - f, err := os.Create(filepath.Join(dir, artifactPath)) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(f.Close()).ToNot(HaveOccurred()) - time.Sleep(tt.createPause) - } - - deletedPaths, err := s.getGarbageFiles(artifact, tt.totalCountLimit, tt.maxItemsToBeRetained, tt.ttl) - g.Expect(err).ToNot(HaveOccurred(), "failed to collect garbage files") - g.Expect(len(tt.wantDeleted)).To(Equal(len(deletedPaths))) - for _, wantDeletedPath := range tt.wantDeleted { - present := false - for _, deletedPath := range deletedPaths { - if strings.Contains(deletedPath, wantDeletedPath) { - present = true - break - } - } - if !present { - g.Fail(fmt.Sprintf("expected file to be deleted, still exists: %s", wantDeletedPath)) - } - } - }) - } -} - -func TestStorage_GarbageCollect(t *testing.T) { - artifactFolder := filepath.Join("foo", "bar") - tests := []struct { - name string - artifactPaths []string - wantCollected []string - wantDeleted []string - wantErr string - ctxTimeout time.Duration - }{ - { - name: "garbage collects", - artifactPaths: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact1.tar.gz.lock"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz.lock"), - filepath.Join(artifactFolder, "artifact3.tar.gz"), - filepath.Join(artifactFolder, "artifact4.tar.gz"), - }, - wantCollected: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - }, - wantDeleted: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact1.tar.gz.lock"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz.lock"), - }, - ctxTimeout: time.Second * 1, - }, - { - name: "garbage collection fails with context timeout", - artifactPaths: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact3.tar.gz"), - filepath.Join(artifactFolder, "artifact4.tar.gz"), - }, - wantErr: "context deadline exceeded", - ctxTimeout: time.Nanosecond * 1, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - dir := t.TempDir() - - s, err := NewStorage(dir, "hostname", time.Second*2, 2) - g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage") - - artifact := sourcev1.Artifact{ - Path: tt.artifactPaths[len(tt.artifactPaths)-1], - } - g.Expect(os.MkdirAll(filepath.Join(dir, artifactFolder), 0o750)).ToNot(HaveOccurred()) - for i, artifactPath := range tt.artifactPaths { - f, err := os.Create(filepath.Join(dir, artifactPath)) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(f.Close()).ToNot(HaveOccurred()) - if i != len(tt.artifactPaths)-1 { - time.Sleep(time.Second * 1) - } - } - - collectedPaths, err := s.GarbageCollect(context.TODO(), artifact, tt.ctxTimeout) - if tt.wantErr == "" { - g.Expect(err).ToNot(HaveOccurred(), "failed to collect garbage files") - } else { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) - } - if len(tt.wantCollected) > 0 { - g.Expect(len(tt.wantCollected)).To(Equal(len(collectedPaths))) - for _, wantCollectedPath := range tt.wantCollected { - present := false - for _, collectedPath := range collectedPaths { - if strings.Contains(collectedPath, wantCollectedPath) { - g.Expect(collectedPath).ToNot(BeAnExistingFile()) - present = true - break - } - } - if present == false { - g.Fail(fmt.Sprintf("expected file to be garbage collected, still exists: %s", wantCollectedPath)) - } - } - } - for _, delFile := range tt.wantDeleted { - g.Expect(filepath.Join(dir, delFile)).ToNot(BeAnExistingFile()) - } - }) - } -} - -func TestStorage_VerifyArtifact(t *testing.T) { - g := NewWithT(t) - - dir := t.TempDir() - s, err := NewStorage(dir, "", 0, 0) - g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage") - - g.Expect(os.WriteFile(filepath.Join(dir, "artifact"), []byte("test"), 0o600)).To(Succeed()) - - t.Run("artifact without digest", func(t *testing.T) { - g := NewWithT(t) - - err := s.VerifyArtifact(sourcev1.Artifact{}) - g.Expect(err).To(HaveOccurred()) - g.Expect(err).To(MatchError("artifact has no digest")) - }) - - t.Run("artifact with invalid digest", func(t *testing.T) { - g := NewWithT(t) - - err := s.VerifyArtifact(sourcev1.Artifact{Digest: "invalid"}) - g.Expect(err).To(HaveOccurred()) - g.Expect(err).To(MatchError("failed to parse artifact digest 'invalid': invalid checksum digest format")) - }) - - t.Run("artifact with invalid path", func(t *testing.T) { - g := NewWithT(t) - - err := s.VerifyArtifact(sourcev1.Artifact{ - Digest: "sha256:9ba7a35ce8acd3557fe30680ef193ca7a36bb5dc62788f30de7122a0a5beab69", - Path: "invalid", - }) - g.Expect(err).To(HaveOccurred()) - g.Expect(errors.Is(err, os.ErrNotExist)).To(BeTrue()) - }) - - t.Run("artifact with digest mismatch", func(t *testing.T) { - g := NewWithT(t) - - err := s.VerifyArtifact(sourcev1.Artifact{ - Digest: "sha256:9ba7a35ce8acd3557fe30680ef193ca7a36bb5dc62788f30de7122a0a5beab69", - Path: "artifact", - }) - g.Expect(err).To(HaveOccurred()) - g.Expect(err).To(MatchError("computed digest doesn't match 'sha256:9ba7a35ce8acd3557fe30680ef193ca7a36bb5dc62788f30de7122a0a5beab69'")) - }) - - t.Run("artifact with digest match", func(t *testing.T) { - g := NewWithT(t) - - err := s.VerifyArtifact(sourcev1.Artifact{ - Digest: "sha256:9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08", - Path: "artifact", - }) - g.Expect(err).ToNot(HaveOccurred()) - }) -} diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 64bcec8a8..ad0365616 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -32,6 +32,10 @@ import ( "testing" "time" + "github.com/distribution/distribution/v3/configuration" + dockerRegistry "github.com/distribution/distribution/v3/registry" + _ "github.com/distribution/distribution/v3/registry/auth/htpasswd" + _ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory" "github.com/foxcpp/go-mockdns" "github.com/phayes/freeport" "github.com/sirupsen/logrus" @@ -43,19 +47,17 @@ import ( "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" - "github.com/distribution/distribution/v3/configuration" - dockerRegistry "github.com/distribution/distribution/v3/registry" - _ "github.com/distribution/distribution/v3/registry/auth/htpasswd" - _ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory" - + "github.com/fluxcd/pkg/artifact/config" + "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/metrics" "github.com/fluxcd/pkg/runtime/testenv" "github.com/fluxcd/pkg/testserver" sourcev1 "github.com/fluxcd/source-controller/api/v1" - sourcev1beta2 "github.com/fluxcd/source-controller/api/v1beta2" "github.com/fluxcd/source-controller/internal/cache" // +kubebuilder:scaffold:imports ) @@ -82,7 +84,7 @@ const ( var ( k8sClient client.Client testEnv *testenv.Environment - testStorage *Storage + testStorage *storage.Storage testServer *testserver.ArtifactServer testMetricsH controller.Metrics ctx = ctrl.SetupSignalHandler() @@ -156,19 +158,23 @@ func setupRegistryServer(ctx context.Context, workspaceDir string, opts registry // mock DNS to map example.com to 127.0.0.1. // This is required because Docker enforces HTTP if the registry // is hosted on localhost/127.0.0.1. - server.registryHost = fmt.Sprintf("example.com:%d", port) - // Disable DNS server logging as it is extremely chatty. - dnsLog := log.Default() - dnsLog.SetOutput(io.Discard) - server.dnsServer, err = mockdns.NewServerWithLogger(map[string]mockdns.Zone{ - "example.com.": { - A: []string{"127.0.0.1"}, - }, - }, dnsLog, false) - if err != nil { - return nil, err + if opts.withTLS { + server.registryHost = fmt.Sprintf("example.com:%d", port) + // Disable DNS server logging as it is extremely chatty. + dnsLog := log.Default() + dnsLog.SetOutput(io.Discard) + server.dnsServer, err = mockdns.NewServerWithLogger(map[string]mockdns.Zone{ + "example.com.": { + A: []string{"127.0.0.1"}, + }, + }, dnsLog, false) + if err != nil { + return nil, err + } + server.dnsServer.PatchNet(net.DefaultResolver) + } else { + server.registryHost = fmt.Sprintf("127.0.0.1:%d", port) } - server.dnsServer.PatchNet(net.DefaultResolver) config.HTTP.Addr = fmt.Sprintf(":%d", port) config.HTTP.DrainTimeout = time.Duration(10) * time.Second @@ -269,7 +275,6 @@ func TestMain(m *testing.M) { initTestTLS() utilruntime.Must(sourcev1.AddToScheme(scheme.Scheme)) - utilruntime.Must(sourcev1beta2.AddToScheme(scheme.Scheme)) testEnv = testenv.New( testenv.WithCRDPath(filepath.Join("..", "..", "config", "crd", "bases")), @@ -427,12 +432,20 @@ func initTestTLS() { } } -func newTestStorage(s *testserver.HTTPServer) (*Storage, error) { - storage, err := NewStorage(s.Root(), s.URL(), retentionTTL, retentionRecords) +func newTestStorage(s *testserver.HTTPServer) (*storage.Storage, error) { + opts := &config.Options{ + StoragePath: s.Root(), + StorageAddress: s.URL(), + StorageAdvAddress: s.URL(), + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) if err != nil { return nil, err } - return storage, nil + return st, nil } var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890") @@ -448,3 +461,8 @@ func randStringRunes(n int) string { func int64p(i int64) *int64 { return &i } + +func logOCIRepoStatus(t *testing.T, obj *sourcev1.OCIRepository) { + sts, _ := yaml.Marshal(obj.Status) + t.Log(string(sts)) +} diff --git a/internal/controller/testdata/certs/ca-key.pem b/internal/controller/testdata/certs/ca-key.pem index b69de5ab5..5f78af275 100644 --- a/internal/controller/testdata/certs/ca-key.pem +++ b/internal/controller/testdata/certs/ca-key.pem @@ -1,5 +1,5 @@ -----BEGIN EC PRIVATE KEY----- -MHcCAQEEIOH/u9dMcpVcZ0+X9Fc78dCTj8SHuXawhLjhu/ej64WToAoGCCqGSM49 -AwEHoUQDQgAEruH/kPxtX3cyYR2G7TYmxLq6AHyzo/NGXc9XjGzdJutE2SQzn37H -dvSJbH+Lvqo9ik0uiJVRVdCYD1j7gNszGA== +MHcCAQEEICJFvVFVBSL0EteniBRfI9M1tm9Vmh9CKv7dhvZSqtV6oAoGCCqGSM49 +AwEHoUQDQgAE+EGQ9wZw/XIbyCwu7wvbzoGhpE2KtZwSUXboPEAgacfaqfgdT92D +If9qYie8umbgUymQnnqN8fRnT/wqqdBLDg== -----END EC PRIVATE KEY----- diff --git a/internal/controller/testdata/certs/ca.csr b/internal/controller/testdata/certs/ca.csr index baa8aeb26..ed5490ce2 100644 --- a/internal/controller/testdata/certs/ca.csr +++ b/internal/controller/testdata/certs/ca.csr @@ -1,9 +1,9 @@ -----BEGIN CERTIFICATE REQUEST----- -MIIBIDCBxgIBADAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49 -AgEGCCqGSM49AwEHA0IABK7h/5D8bV93MmEdhu02JsS6ugB8s6PzRl3PV4xs3Sbr -RNkkM59+x3b0iWx/i76qPYpNLoiVUVXQmA9Y+4DbMxigSzBJBgkqhkiG9w0BCQ4x +MIIBHzCBxgIBADAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49 +AgEGCCqGSM49AwEHA0IABPhBkPcGcP1yG8gsLu8L286BoaRNirWcElF26DxAIGnH +2qn4HU/dgyH/amInvLpm4FMpkJ56jfH0Z0/8KqnQSw6gSzBJBgkqhkiG9w0BCQ4x PDA6MDgGA1UdEQQxMC+CCWxvY2FsaG9zdIILZXhhbXBsZS5jb22CD3d3dy5leGFt -cGxlLmNvbYcEfwAAATAKBggqhkjOPQQDAgNJADBGAiEAkw85nyLhJssyCYsaFvRU -EErhu66xHPJug/nG50uV5OoCIQCUorrflOSxfChPeCe4xfwcPv7FpcCYbKVYtGzz -b34Wow== +cGxlLmNvbYcEfwAAATAKBggqhkjOPQQDAgNIADBFAiEA1PxOWSIrmLb5IeejHvfx +AkjpamR/GTLhSzXlGv1hCmsCIDSeZL2OF5R5k2v4giXiB6GUfmawykGkO2fIG1kq +5l5V -----END CERTIFICATE REQUEST----- diff --git a/internal/controller/testdata/certs/ca.pem b/internal/controller/testdata/certs/ca.pem index 080bd24e6..72644519d 100644 --- a/internal/controller/testdata/certs/ca.pem +++ b/internal/controller/testdata/certs/ca.pem @@ -1,11 +1,11 @@ -----BEGIN CERTIFICATE----- -MIIBhzCCAS2gAwIBAgIUdsAtiX3gN0uk7ddxASWYE/tdv0wwCgYIKoZIzj0EAwIw -GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjAwNDE3MDgxODAwWhcNMjUw -NDE2MDgxODAwWjAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49 -AgEGCCqGSM49AwEHA0IABK7h/5D8bV93MmEdhu02JsS6ugB8s6PzRl3PV4xs3Sbr -RNkkM59+x3b0iWx/i76qPYpNLoiVUVXQmA9Y+4DbMxijUzBRMA4GA1UdDwEB/wQE -AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQGyUiU1QEZiMAqjsnIYTwZ -4yp5wzAPBgNVHREECDAGhwR/AAABMAoGCCqGSM49BAMCA0gAMEUCIQDzdtvKdE8O -1+WRTZ9MuSiFYcrEz7Zne7VXouDEKqKEigIgM4WlbDeuNCKbqhqj+xZV0pa3rweb -OD8EjjCMY69RMO0= +MIIBiDCCAS2gAwIBAgIUCRPU/Fa1nIWlk7TUejHGI+WKJFAwCgYIKoZIzj0EAwIw +GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjUwNDIyMDcwNTAwWhcNMzAw +NDIxMDcwNTAwWjAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49 +AgEGCCqGSM49AwEHA0IABPhBkPcGcP1yG8gsLu8L286BoaRNirWcElF26DxAIGnH +2qn4HU/dgyH/amInvLpm4FMpkJ56jfH0Z0/8KqnQSw6jUzBRMA4GA1UdDwEB/wQE +AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBS+cS2gBCfSCltLUMNY0kG2 +mj9zEDAPBgNVHREECDAGhwR/AAABMAoGCCqGSM49BAMCA0kAMEYCIQC33kO/m+ab +i/2dlkg7hab4jCkFkxV3fWiOP0lbrLIMYQIhAPOcHeXmGE32apXKoZ6IfGJdMtz1 +3bkHYeqNs2qtpQ/5 -----END CERTIFICATE----- diff --git a/internal/controller/testdata/certs/client-key.pem b/internal/controller/testdata/certs/client-key.pem index b39c483d0..f55b40b4d 100644 --- a/internal/controller/testdata/certs/client-key.pem +++ b/internal/controller/testdata/certs/client-key.pem @@ -1,5 +1,5 @@ -----BEGIN EC PRIVATE KEY----- -MHcCAQEEICpqb1p1TH98yoFXEEt6JmWc/Snb8NaYyz8jfTOVDBLOoAoGCCqGSM49 -AwEHoUQDQgAERjzob4CCuyv+cYPyTYCPHwGuqSNGNuX3UGWpxvzwEqjYEWiePlOz -eJLk4DWaVX8CmVakNLsK/EHnBv9ErG7QYQ== +MHcCAQEEIFVLYwGEhqLW/WYnsA9om6cSxcgVsKnwIWXc34DF7LpwoAoGCCqGSM49 +AwEHoUQDQgAE5H76We32W5cQq8DRJT+pteyh53GUBiI5IbM+qVWgsCIFJEaSJKgs +mv1H7c3NhP292Pgr6vdWJACLQHzmpsVpmg== -----END EC PRIVATE KEY----- diff --git a/internal/controller/testdata/certs/client.csr b/internal/controller/testdata/certs/client.csr index 41f498804..3699ea27b 100644 --- a/internal/controller/testdata/certs/client.csr +++ b/internal/controller/testdata/certs/client.csr @@ -1,8 +1,8 @@ -----BEGIN CERTIFICATE REQUEST----- -MIIBHDCBwwIBADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG -CCqGSM49AwEHA0IABEY86G+Agrsr/nGD8k2Ajx8BrqkjRjbl91Blqcb88BKo2BFo -nj5Ts3iS5OA1mlV/AplWpDS7CvxB5wb/RKxu0GGgSzBJBgkqhkiG9w0BCQ4xPDA6 +MIIBGzCBwwIBADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG +CCqGSM49AwEHA0IABOR++lnt9luXEKvA0SU/qbXsoedxlAYiOSGzPqlVoLAiBSRG +kiSoLJr9R+3NzYT9vdj4K+r3ViQAi0B85qbFaZqgSzBJBgkqhkiG9w0BCQ4xPDA6 MDgGA1UdEQQxMC+CCWxvY2FsaG9zdIILZXhhbXBsZS5jb22CD3d3dy5leGFtcGxl -LmNvbYcEfwAAATAKBggqhkjOPQQDAgNIADBFAiAHmtr9fDDx5eyFfY7r5m8xA4Wh -Jm+TB6/czvXRNNOKzAIhAN7ln6BpneEm2oqIBGqvfc3pETC6jdGJxCfYw+X+7von +LmNvbYcEfwAAATAKBggqhkjOPQQDAgNHADBEAiB0px2gw2ICFz26zAajtJyoNHl+ +inOXY5ohtzP4ag+NXQIgAbjIsOUuQ7JT31DdI6yCVfO014hHawtEsdV4rxTrQMA= -----END CERTIFICATE REQUEST----- diff --git a/internal/controller/testdata/certs/client.pem b/internal/controller/testdata/certs/client.pem index 4a85663ea..9db876e59 100644 --- a/internal/controller/testdata/certs/client.pem +++ b/internal/controller/testdata/certs/client.pem @@ -1,13 +1,13 @@ -----BEGIN CERTIFICATE----- -MIIB7DCCAZKgAwIBAgIUPJmKtZ6CfSxybx2BSsVS5EVun0swCgYIKoZIzj0EAwIw -GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjMwNzE5MTExMzAwWhcNMzMw -NzE2MTExMzAwWjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG -CCqGSM49AwEHA0IABEY86G+Agrsr/nGD8k2Ajx8BrqkjRjbl91Blqcb88BKo2BFo -nj5Ts3iS5OA1mlV/AplWpDS7CvxB5wb/RKxu0GGjgbowgbcwDgYDVR0PAQH/BAQD +MIIB7DCCAZKgAwIBAgIUPH5zyEsXoFMCMkZaM2s6YtnoQcgwCgYIKoZIzj0EAwIw +GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjUwNDIyMDcwNTAwWhcNMzUw +NDIwMDcwNTAwWjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG +CCqGSM49AwEHA0IABOR++lnt9luXEKvA0SU/qbXsoedxlAYiOSGzPqlVoLAiBSRG +kiSoLJr9R+3NzYT9vdj4K+r3ViQAi0B85qbFaZqjgbowgbcwDgYDVR0PAQH/BAQD AgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAA -MB0GA1UdDgQWBBTgAyCQoH/EJqz/nY5DJa/uvWWshzAfBgNVHSMEGDAWgBQGyUiU -1QEZiMAqjsnIYTwZ4yp5wzA4BgNVHREEMTAvgglsb2NhbGhvc3SCC2V4YW1wbGUu -Y29tgg93d3cuZXhhbXBsZS5jb22HBH8AAAEwCgYIKoZIzj0EAwIDSAAwRQIgKSJH -YvhKiXcUUzRoL6FsXQeAlhemSg3lD9se+BhRF8ECIQDx2UpWFLDe5NOPqhrcR1Sd -haFriAG8eR1yD3u3nJvY6g== +MB0GA1UdDgQWBBTqud4vpysQdb1/5K3RoDXvBdQGgzAfBgNVHSMEGDAWgBS+cS2g +BCfSCltLUMNY0kG2mj9zEDA4BgNVHREEMTAvgglsb2NhbGhvc3SCC2V4YW1wbGUu +Y29tgg93d3cuZXhhbXBsZS5jb22HBH8AAAEwCgYIKoZIzj0EAwIDSAAwRQIhAM0u +Eo6u3BDtw5bezhLa/THDy4QT63ktpAff9i/QJOErAiAifOvW7n5ZTLjjSnJ+dCtr +Avtupcg1WLyryhliqtNKhg== -----END CERTIFICATE----- diff --git a/internal/controller/testdata/certs/server-key.pem b/internal/controller/testdata/certs/server-key.pem index 5054ff39f..64d7da136 100644 --- a/internal/controller/testdata/certs/server-key.pem +++ b/internal/controller/testdata/certs/server-key.pem @@ -1,5 +1,5 @@ -----BEGIN EC PRIVATE KEY----- -MHcCAQEEIKQbEXV6nljOHMmPrWVWQ+JrAE5wsbE9iMhfY7wlJgXOoAoGCCqGSM49 -AwEHoUQDQgAE+53oBGlrvVUTelSGYji8GNHVhVg8jOs1PeeLuXCIZjQmctHLFEq3 -fE+mGxCL93MtpYzlwIWBf0m7pEGQre6bzg== +MHcCAQEEIH19RQir/x9wHNAvHITu7/3Y4ckQ3GsNyEGYF3/nalheoAoGCCqGSM49 +AwEHoUQDQgAEvqlooNIpRmCjv9yBzjqoyXZvcU8zo9npYm3HPX7TReYetrkkJh/P +6a5NDJhnWemcj9iZdm2kGTE7MCgGi4mRog== -----END EC PRIVATE KEY----- diff --git a/internal/controller/testdata/certs/server.csr b/internal/controller/testdata/certs/server.csr index 5caf7b39c..b0fce1781 100644 --- a/internal/controller/testdata/certs/server.csr +++ b/internal/controller/testdata/certs/server.csr @@ -1,8 +1,8 @@ -----BEGIN CERTIFICATE REQUEST----- -MIIBHDCBwwIBADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG -CCqGSM49AwEHA0IABPud6ARpa71VE3pUhmI4vBjR1YVYPIzrNT3ni7lwiGY0JnLR -yxRKt3xPphsQi/dzLaWM5cCFgX9Ju6RBkK3um86gSzBJBgkqhkiG9w0BCQ4xPDA6 +MIIBGzCBwwIBADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG +CCqGSM49AwEHA0IABL6paKDSKUZgo7/cgc46qMl2b3FPM6PZ6WJtxz1+00XmHra5 +JCYfz+muTQyYZ1npnI/YmXZtpBkxOzAoBouJkaKgSzBJBgkqhkiG9w0BCQ4xPDA6 MDgGA1UdEQQxMC+CCWxvY2FsaG9zdIILZXhhbXBsZS5jb22CD3d3dy5leGFtcGxl -LmNvbYcEfwAAATAKBggqhkjOPQQDAgNIADBFAiB5A6wvQ5x6g/zhiyn+wLzXsOaB -Gb/F25p/zTHHQqZbkwIhAPUgWzy/2bs6eZEi97bSlaRdmrqHwqT842t5sEwGyXNV +LmNvbYcEfwAAATAKBggqhkjOPQQDAgNHADBEAiAJbvDLjrCkTRvTjrv2wXLN9Hgu +p6SrTQJUWlIj3S8DggIgJraxPvnwfeKE5dM7ZgJXADHy838h04dQ+Za7hS899V8= -----END CERTIFICATE REQUEST----- diff --git a/internal/controller/testdata/certs/server.pem b/internal/controller/testdata/certs/server.pem index 11c655a0b..f3345e3b2 100644 --- a/internal/controller/testdata/certs/server.pem +++ b/internal/controller/testdata/certs/server.pem @@ -1,13 +1,13 @@ -----BEGIN CERTIFICATE----- -MIIB7TCCAZKgAwIBAgIUB+17B8PU05wVTzRHLeG+S+ybZK4wCgYIKoZIzj0EAwIw -GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjAwNDE3MDgxODAwWhcNMzAw -NDE1MDgxODAwWjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG -CCqGSM49AwEHA0IABPud6ARpa71VE3pUhmI4vBjR1YVYPIzrNT3ni7lwiGY0JnLR -yxRKt3xPphsQi/dzLaWM5cCFgX9Ju6RBkK3um86jgbowgbcwDgYDVR0PAQH/BAQD +MIIB6zCCAZKgAwIBAgIUSGuttQSdoyWQzeZ6GkiKORYYUvQwCgYIKoZIzj0EAwIw +GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjUwNDIyMDcwNTAwWhcNMzUw +NDIwMDcwNTAwWjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG +CCqGSM49AwEHA0IABL6paKDSKUZgo7/cgc46qMl2b3FPM6PZ6WJtxz1+00XmHra5 +JCYfz+muTQyYZ1npnI/YmXZtpBkxOzAoBouJkaKjgbowgbcwDgYDVR0PAQH/BAQD AgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAA -MB0GA1UdDgQWBBTM8HS5EIlVMBYv/300jN8PEArUgDAfBgNVHSMEGDAWgBQGyUiU -1QEZiMAqjsnIYTwZ4yp5wzA4BgNVHREEMTAvgglsb2NhbGhvc3SCC2V4YW1wbGUu -Y29tgg93d3cuZXhhbXBsZS5jb22HBH8AAAEwCgYIKoZIzj0EAwIDSQAwRgIhAOgB -5W82FEgiTTOmsNRekkK5jUPbj4D4eHtb2/BI7ph4AiEA2AxHASIFBdv5b7Qf5prb -bdNmUCzAvVuCAKuMjg2OPrE= +MB0GA1UdDgQWBBSNrNAk9jWUcFjxjAKzuDwsBrG1NDAfBgNVHSMEGDAWgBS+cS2g +BCfSCltLUMNY0kG2mj9zEDA4BgNVHREEMTAvgglsb2NhbGhvc3SCC2V4YW1wbGUu +Y29tgg93d3cuZXhhbXBsZS5jb22HBH8AAAEwCgYIKoZIzj0EAwIDRwAwRAIgIcrb +xGgcRsmP/R6Qo+Xe/w1UvNDaWJfsWO+hq1DtOQgCIEyGi3ClowsGnNpo734ArWbG +taem7qVKZJmCWRM6DFuT -----END CERTIFICATE----- diff --git a/internal/controller/testdata/podinfo/podinfo-6.1.5-beta.1.tar b/internal/controller/testdata/podinfo/podinfo-6.1.5-beta.1.tar new file mode 100644 index 000000000..335d6a5ad Binary files /dev/null and b/internal/controller/testdata/podinfo/podinfo-6.1.5-beta.1.tar differ diff --git a/internal/controller/testdata/podinfo/podinfo-6.1.5-rc.1.tar b/internal/controller/testdata/podinfo/podinfo-6.1.5-rc.1.tar new file mode 100644 index 000000000..335d6a5ad Binary files /dev/null and b/internal/controller/testdata/podinfo/podinfo-6.1.5-rc.1.tar differ diff --git a/internal/controller/testdata/podinfo/podinfo-6.1.6-rc.1.tar b/internal/controller/testdata/podinfo/podinfo-6.1.6-rc.1.tar new file mode 100644 index 000000000..09616c2df Binary files /dev/null and b/internal/controller/testdata/podinfo/podinfo-6.1.6-rc.1.tar differ diff --git a/internal/digest/digest.go b/internal/digest/digest.go deleted file mode 100644 index 6b1117398..000000000 --- a/internal/digest/digest.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package digest - -import ( - "crypto" - _ "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" - "fmt" - - "github.com/opencontainers/go-digest" - _ "github.com/opencontainers/go-digest/blake3" -) - -const ( - SHA1 digest.Algorithm = "sha1" -) - -var ( - // Canonical is the primary digest algorithm used to calculate checksums. - Canonical = digest.SHA256 -) - -func init() { - // Register SHA-1 algorithm for support of e.g. Git commit SHAs. - digest.RegisterAlgorithm(SHA1, crypto.SHA1) -} - -// AlgorithmForName returns the digest algorithm for the given name, or an -// error of type digest.ErrDigestUnsupported if the algorithm is unavailable. -func AlgorithmForName(name string) (digest.Algorithm, error) { - a := digest.Algorithm(name) - if !a.Available() { - return "", fmt.Errorf("%w: %s", digest.ErrDigestUnsupported, name) - } - return a, nil -} diff --git a/internal/digest/digest_test.go b/internal/digest/digest_test.go deleted file mode 100644 index 3030c2d11..000000000 --- a/internal/digest/digest_test.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package digest - -import ( - "errors" - "testing" - - . "github.com/onsi/gomega" - "github.com/opencontainers/go-digest" -) - -func TestAlgorithmForName(t *testing.T) { - tests := []struct { - name string - want digest.Algorithm - wantErr error - }{ - { - name: "sha256", - want: digest.SHA256, - }, - { - name: "sha384", - want: digest.SHA384, - }, - { - name: "sha512", - want: digest.SHA512, - }, - { - name: "blake3", - want: digest.BLAKE3, - }, - { - name: "sha1", - want: SHA1, - }, - { - name: "not-available", - wantErr: digest.ErrDigestUnsupported, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - got, err := AlgorithmForName(tt.name) - if tt.wantErr != nil { - g.Expect(err).To(HaveOccurred()) - g.Expect(errors.Is(err, tt.wantErr)).To(BeTrue()) - return - } - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got).To(Equal(tt.want)) - }) - } -} diff --git a/internal/digest/writer.go b/internal/digest/writer.go deleted file mode 100644 index 4783f8b84..000000000 --- a/internal/digest/writer.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package digest - -import ( - "fmt" - "io" - - "github.com/opencontainers/go-digest" -) - -// MultiDigester is a digester that writes to multiple digesters to calculate -// the checksum of different algorithms. -type MultiDigester struct { - d map[digest.Algorithm]digest.Digester -} - -// NewMultiDigester returns a new MultiDigester that writes to newly -// initialized digesters for the given algorithms. If a provided algorithm is -// not available, it returns a digest.ErrDigestUnsupported error. -func NewMultiDigester(algos ...digest.Algorithm) (*MultiDigester, error) { - d := make(map[digest.Algorithm]digest.Digester, len(algos)) - for _, a := range algos { - if _, ok := d[a]; ok { - continue - } - if !a.Available() { - return nil, fmt.Errorf("%w: %s", digest.ErrDigestUnsupported, a) - } - d[a] = a.Digester() - } - return &MultiDigester{d: d}, nil -} - -// Write writes p to all underlying digesters. -func (w *MultiDigester) Write(p []byte) (n int, err error) { - for _, d := range w.d { - n, err = d.Hash().Write(p) - if err != nil { - return - } - if n != len(p) { - err = io.ErrShortWrite - return - } - } - return len(p), nil -} - -// Digest returns the digest of the data written to the digester of the given -// algorithm, or an empty digest if the algorithm is not available. -func (w *MultiDigester) Digest(algo digest.Algorithm) digest.Digest { - if d, ok := w.d[algo]; ok { - return d.Digest() - } - return "" -} diff --git a/internal/digest/writer_test.go b/internal/digest/writer_test.go deleted file mode 100644 index 9ae63b882..000000000 --- a/internal/digest/writer_test.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package digest - -import ( - "crypto/rand" - "testing" - - . "github.com/onsi/gomega" - "github.com/opencontainers/go-digest" -) - -func TestNewMultiDigester(t *testing.T) { - t.Run("constructs a MultiDigester", func(t *testing.T) { - g := NewWithT(t) - - d, err := NewMultiDigester(Canonical, digest.SHA512) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(d.d).To(HaveLen(2)) - }) - - t.Run("returns an error if an algorithm is not available", func(t *testing.T) { - g := NewWithT(t) - - _, err := NewMultiDigester(digest.Algorithm("not-available")) - g.Expect(err).To(HaveOccurred()) - }) -} - -func TestMultiDigester_Write(t *testing.T) { - t.Run("writes to all digesters", func(t *testing.T) { - g := NewWithT(t) - - d, err := NewMultiDigester(Canonical, digest.SHA512) - g.Expect(err).ToNot(HaveOccurred()) - - n, err := d.Write([]byte("hello")) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(n).To(Equal(5)) - - n, err = d.Write([]byte(" world")) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(n).To(Equal(6)) - - g.Expect(d.Digest(Canonical)).To(BeEquivalentTo("sha256:b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9")) - g.Expect(d.Digest(digest.SHA512)).To(BeEquivalentTo("sha512:309ecc489c12d6eb4cc40f50c902f2b4d0ed77ee511a7c7a9bcd3ca86d4cd86f989dd35bc5ff499670da34255b45b0cfd830e81f605dcf7dc5542e93ae9cd76f")) - }) -} - -func TestMultiDigester_Digest(t *testing.T) { - t.Run("returns the digest for the given algorithm", func(t *testing.T) { - g := NewWithT(t) - - d, err := NewMultiDigester(Canonical, digest.SHA512) - g.Expect(err).ToNot(HaveOccurred()) - - g.Expect(d.Digest(Canonical)).To(BeEquivalentTo("sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")) - g.Expect(d.Digest(digest.SHA512)).To(BeEquivalentTo("sha512:cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e")) - }) - - t.Run("returns an empty digest if the algorithm is not supported", func(t *testing.T) { - g := NewWithT(t) - - d, err := NewMultiDigester(Canonical, digest.SHA512) - g.Expect(err).ToNot(HaveOccurred()) - - g.Expect(d.Digest(digest.Algorithm("not-available"))).To(BeEmpty()) - }) -} - -func benchmarkMultiDigesterWrite(b *testing.B, algos []digest.Algorithm, pSize int64) { - md, err := NewMultiDigester(algos...) - if err != nil { - b.Fatal(err) - } - - p := make([]byte, pSize) - if _, err = rand.Read(p); err != nil { - b.Fatal(err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - md.Write(p) - } -} - -func BenchmarkMultiDigester_Write(b *testing.B) { - const pSize = 1024 * 2 - - b.Run("sha1", func(b *testing.B) { - benchmarkMultiDigesterWrite(b, []digest.Algorithm{SHA1}, pSize) - }) - - b.Run("sha256", func(b *testing.B) { - benchmarkMultiDigesterWrite(b, []digest.Algorithm{digest.SHA256}, pSize) - }) - - b.Run("blake3", func(b *testing.B) { - benchmarkMultiDigesterWrite(b, []digest.Algorithm{digest.BLAKE3}, pSize) - }) - - b.Run("sha256+sha384", func(b *testing.B) { - benchmarkMultiDigesterWrite(b, []digest.Algorithm{digest.SHA256, digest.SHA384}, pSize) - }) - - b.Run("sha256+sha512", func(b *testing.B) { - benchmarkMultiDigesterWrite(b, []digest.Algorithm{digest.SHA256, digest.SHA512}, pSize) - }) - - b.Run("sha256+blake3", func(b *testing.B) { - benchmarkMultiDigesterWrite(b, []digest.Algorithm{digest.SHA256, digest.BLAKE3}, pSize) - }) -} diff --git a/internal/error/sanitized.go b/internal/error/sanitized.go new file mode 100644 index 000000000..04f6ccf92 --- /dev/null +++ b/internal/error/sanitized.go @@ -0,0 +1,76 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package error + +import ( + "fmt" + "net/url" + "regexp" +) + +type SanitizedError struct { + err string +} + +func (e SanitizedError) Error() string { + return e.err +} + +// SanitizeError extracts all URLs from the error message +// and replaces them with the URL without the query string. +func SanitizeError(err error) SanitizedError { + errorMessage := err.Error() + for _, u := range extractURLs(errorMessage) { + urlWithoutQueryString, err := removeQueryString(u) + if err == nil { + re, err := regexp.Compile(fmt.Sprintf("%s*", regexp.QuoteMeta(u))) + if err == nil { + errorMessage = re.ReplaceAllString(errorMessage, urlWithoutQueryString) + } + } + } + + return SanitizedError{errorMessage} +} + +// removeQueryString takes a URL string as input and returns the URL without the query string. +func removeQueryString(urlStr string) (string, error) { + // Parse the URL. + u, err := url.Parse(urlStr) + if err != nil { + return "", err + } + + // Rebuild the URL without the query string. + u.RawQuery = "" + return u.String(), nil +} + +// extractURLs takes a log message as input and returns the URLs found. +func extractURLs(logMessage string) []string { + // Define a regular expression to match a URL. + // This is a simple pattern and might need to be adjusted depending on the log message format. + urlRegex := regexp.MustCompile(`https?://[^\s]+`) + + // Find the first match in the log message. + matches := urlRegex.FindAllString(logMessage, -1) + if len(matches) == 0 { + return []string{} + } + + return matches +} diff --git a/internal/error/sanitized_test.go b/internal/error/sanitized_test.go new file mode 100644 index 000000000..e9c6a858b --- /dev/null +++ b/internal/error/sanitized_test.go @@ -0,0 +1,141 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package error + +import ( + "errors" + "testing" + + . "github.com/onsi/gomega" +) + +func Test_extractURLs(t *testing.T) { + + tests := []struct { + name string + logMessage string + wantUrls []string + }{ + { + name: "Log Contains single URL", + logMessage: "Get \"https://blobstorage.blob.core.windows.net/container/index.yaml?se=2024-05-01T16%3A28%3A26Z&sig=Signature&sp=rl&sr=c&st=2024-02-01T16%3A28%3A26Z&sv=2022-11-02\": dial tcp 20.60.53.129:443: connect: connection refused", + wantUrls: []string{"https://blobstorage.blob.core.windows.net/container/index.yaml?se=2024-05-01T16%3A28%3A26Z&sig=Signature&sp=rl&sr=c&st=2024-02-01T16%3A28%3A26Z&sv=2022-11-02\":"}, + }, + { + name: "Log Contains multiple URL", + logMessage: "Get \"https://blobstorage.blob.core.windows.net/container/index.yaml?abc=es https://blobstorage1.blob.core.windows.net/container/index.yaml?abc=no : dial tcp 20.60.53.129:443: connect: connection refused", + wantUrls: []string{ + "https://blobstorage.blob.core.windows.net/container/index.yaml?abc=es", + "https://blobstorage1.blob.core.windows.net/container/index.yaml?abc=no", + }, + }, + { + name: "Log Contains No URL", + logMessage: "Log message without URL", + wantUrls: []string{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + urls := extractURLs(tt.logMessage) + + g.Expect(len(urls)).To(Equal(len(tt.wantUrls))) + for i := range tt.wantUrls { + g.Expect(urls[i]).To(Equal(tt.wantUrls[i])) + } + }) + } +} + +func Test_removeQueryString(t *testing.T) { + + tests := []struct { + name string + urlStr string + wantUrl string + }{ + { + name: "URL with query string", + urlStr: "https://blobstorage.blob.core.windows.net/container/index.yaml?se=2024-05-01T16%3A28%3A26Z&sig=Signature&sp=rl&sr=c&st=2024-02-01T16%3A28%3A26Z&sv=2022-11-02", + wantUrl: "https://blobstorage.blob.core.windows.net/container/index.yaml", + }, + { + name: "URL without query string", + urlStr: "https://blobstorage.blob.core.windows.net/container/index.yaml", + wantUrl: "https://blobstorage.blob.core.windows.net/container/index.yaml", + }, + { + name: "URL with query string and port", + urlStr: "https://blobstorage.blob.core.windows.net:443/container/index.yaml?se=2024-05-01T16%3A28%3A26Z&sig=Signature&sp=rl&sr=c&st=2024-02-01T16%3A28%3A26Z&sv=2022-11-02", + wantUrl: "https://blobstorage.blob.core.windows.net:443/container/index.yaml", + }, + { + name: "Invalid URL", + urlStr: "NoUrl", + wantUrl: "NoUrl", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + urlWithoutQueryString, err := removeQueryString(tt.urlStr) + + g.Expect(err).To(BeNil()) + g.Expect(urlWithoutQueryString).To(Equal(tt.wantUrl)) + }) + } +} + +func Test_SanitizeError(t *testing.T) { + + tests := []struct { + name string + errMessage string + wantErrMessage string + }{ + { + name: "Log message with URL with query string", + errMessage: "Get \"https://blobstorage.blob.core.windows.net/container/index.yaml?se=2024-05-01T16%3A28%3A26Z&sig=Signature&sp=rl&sr=c&st=2024-02-01T16%3A28%3A26Z&sv=2022-11-02\": dial tcp 20.60.53.129:443: connect: connection refused", + wantErrMessage: "Get \"https://blobstorage.blob.core.windows.net/container/index.yaml dial tcp 20.60.53.129:443: connect: connection refused", + }, + { + name: "Log message without URL", + errMessage: "Log message contains no URL", + wantErrMessage: "Log message contains no URL", + }, + + { + name: "Log message with multiple Urls", + errMessage: "Get \"https://blobstorage.blob.core.windows.net/container/index.yaml?abc=es https://blobstorage1.blob.core.windows.net/container/index.yaml?abc=no dial tcp 20.60.53.129:443: connect: connection refused", + wantErrMessage: "Get \"https://blobstorage.blob.core.windows.net/container/index.yaml https://blobstorage1.blob.core.windows.net/container/index.yaml dial tcp 20.60.53.129:443: connect: connection refused", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + err := SanitizeError(errors.New(tt.errMessage)) + g.Expect(err.Error()).To(Equal(tt.wantErrMessage)) + }) + } +} diff --git a/internal/features/features.go b/internal/features/features.go index c2622ce32..edb9beb17 100644 --- a/internal/features/features.go +++ b/internal/features/features.go @@ -19,7 +19,10 @@ limitations under the License. // states. package features -import feathelper "github.com/fluxcd/pkg/runtime/features" +import ( + "github.com/fluxcd/pkg/auth" + feathelper "github.com/fluxcd/pkg/runtime/features" +) const ( // CacheSecretsAndConfigMaps controls whether secrets and configmaps should be cached. @@ -35,6 +38,10 @@ var features = map[string]bool{ CacheSecretsAndConfigMaps: false, } +func init() { + auth.SetFeatureGates(features) +} + // FeatureGates contains a list of all supported feature gates and // their default values. func FeatureGates() map[string]bool { diff --git a/internal/fs/LICENSE b/internal/fs/LICENSE deleted file mode 100644 index a2dd15faf..000000000 --- a/internal/fs/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2014 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/internal/fs/fs.go b/internal/fs/fs.go deleted file mode 100644 index 21cf96e69..000000000 --- a/internal/fs/fs.go +++ /dev/null @@ -1,345 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fs - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "syscall" -) - -// RenameWithFallback attempts to rename a file or directory, but falls back to -// copying in the event of a cross-device link error. If the fallback copy -// succeeds, src is still removed, emulating normal rename behavior. -func RenameWithFallback(src, dst string) error { - _, err := os.Stat(src) - if err != nil { - return fmt.Errorf("cannot stat %s: %w", src, err) - } - - err = os.Rename(src, dst) - if err == nil { - return nil - } - - return renameFallback(err, src, dst) -} - -// renameByCopy attempts to rename a file or directory by copying it to the -// destination and then removing the src thus emulating the rename behavior. -func renameByCopy(src, dst string) error { - var cerr error - if dir, _ := IsDir(src); dir { - cerr = CopyDir(src, dst) - if cerr != nil { - cerr = fmt.Errorf("copying directory failed: %w", cerr) - } - } else { - cerr = copyFile(src, dst) - if cerr != nil { - cerr = fmt.Errorf("copying file failed: %w", cerr) - } - } - - if cerr != nil { - return fmt.Errorf("rename fallback failed: cannot rename %s to %s: %w", src, dst, cerr) - } - - if err := os.RemoveAll(src); err != nil { - return fmt.Errorf("cannot delete %s: %w", src, err) - } - - return nil -} - -var ( - errSrcNotDir = errors.New("source is not a directory") - errDstExist = errors.New("destination already exists") -) - -// CopyDir recursively copies a directory tree, attempting to preserve permissions. -// Source directory must exist, destination directory must *not* exist. -func CopyDir(src, dst string) error { - src = filepath.Clean(src) - dst = filepath.Clean(dst) - - // We use os.Lstat() here to ensure we don't fall in a loop where a symlink - // actually links to a one of its parent directories. - fi, err := os.Lstat(src) - if err != nil { - return err - } - if !fi.IsDir() { - return errSrcNotDir - } - - _, err = os.Stat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - return errDstExist - } - - if err = os.MkdirAll(dst, fi.Mode()); err != nil { - return fmt.Errorf("cannot mkdir %s: %w", dst, err) - } - - entries, err := os.ReadDir(src) - if err != nil { - return fmt.Errorf("cannot read directory %s: %w", dst, err) - } - - for _, entry := range entries { - srcPath := filepath.Join(src, entry.Name()) - dstPath := filepath.Join(dst, entry.Name()) - - if entry.IsDir() { - if err = CopyDir(srcPath, dstPath); err != nil { - return fmt.Errorf("copying directory failed: %w", err) - } - } else { - // This will include symlinks, which is what we want when - // copying things. - if err = copyFile(srcPath, dstPath); err != nil { - return fmt.Errorf("copying file failed: %w", err) - } - } - } - - return nil -} - -// copyFile copies the contents of the file named src to the file named -// by dst. The file will be created if it does not already exist. If the -// destination file exists, all its contents will be replaced by the contents -// of the source file. The file mode will be copied from the source. -func copyFile(src, dst string) (err error) { - if sym, err := IsSymlink(src); err != nil { - return fmt.Errorf("symlink check failed: %w", err) - } else if sym { - if err := cloneSymlink(src, dst); err != nil { - if runtime.GOOS == "windows" { - // If cloning the symlink fails on Windows because the user - // does not have the required privileges, ignore the error and - // fall back to copying the file contents. - // - // ERROR_PRIVILEGE_NOT_HELD is 1314 (0x522): - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms681385(v=vs.85).aspx - if lerr, ok := err.(*os.LinkError); ok && lerr.Err != syscall.Errno(1314) { - return err - } - } else { - return err - } - } else { - return nil - } - } - - in, err := os.Open(src) - if err != nil { - return - } - defer in.Close() - - out, err := os.Create(dst) - if err != nil { - return - } - - if _, err = io.Copy(out, in); err != nil { - out.Close() - return - } - - // Check for write errors on Close - if err = out.Close(); err != nil { - return - } - - si, err := os.Stat(src) - if err != nil { - return - } - - // Temporary fix for Go < 1.9 - // - // See: https://github.com/golang/dep/issues/774 - // and https://github.com/golang/go/issues/20829 - if runtime.GOOS == "windows" { - dst = fixLongPath(dst) - } - err = os.Chmod(dst, si.Mode()) - - return -} - -// cloneSymlink will create a new symlink that points to the resolved path of sl. -// If sl is a relative symlink, dst will also be a relative symlink. -func cloneSymlink(sl, dst string) error { - resolved, err := os.Readlink(sl) - if err != nil { - return err - } - - return os.Symlink(resolved, dst) -} - -// IsDir determines is the path given is a directory or not. -func IsDir(name string) (bool, error) { - fi, err := os.Stat(name) - if err != nil { - return false, err - } - if !fi.IsDir() { - return false, fmt.Errorf("%q is not a directory", name) - } - return true, nil -} - -// IsSymlink determines if the given path is a symbolic link. -func IsSymlink(path string) (bool, error) { - l, err := os.Lstat(path) - if err != nil { - return false, err - } - - return l.Mode()&os.ModeSymlink == os.ModeSymlink, nil -} - -// fixLongPath returns the extended-length (\\?\-prefixed) form of -// path when needed, in order to avoid the default 260 character file -// path limit imposed by Windows. If path is not easily converted to -// the extended-length form (for example, if path is a relative path -// or contains .. elements), or is short enough, fixLongPath returns -// path unmodified. -// -// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath -func fixLongPath(path string) string { - // Do nothing (and don't allocate) if the path is "short". - // Empirically (at least on the Windows Server 2013 builder), - // the kernel is arbitrarily okay with < 248 bytes. That - // matches what the docs above say: - // "When using an API to create a directory, the specified - // path cannot be so long that you cannot append an 8.3 file - // name (that is, the directory name cannot exceed MAX_PATH - // minus 12)." Since MAX_PATH is 260, 260 - 12 = 248. - // - // The MSDN docs appear to say that a normal path that is 248 bytes long - // will work; empirically the path must be less then 248 bytes long. - if len(path) < 248 { - // Don't fix. (This is how Go 1.7 and earlier worked, - // not automatically generating the \\?\ form) - return path - } - - // The extended form begins with \\?\, as in - // \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt. - // The extended form disables evaluation of . and .. path - // elements and disables the interpretation of / as equivalent - // to \. The conversion here rewrites / to \ and elides - // . elements as well as trailing or duplicate separators. For - // simplicity it avoids the conversion entirely for relative - // paths or paths containing .. elements. For now, - // \\server\share paths are not converted to - // \\?\UNC\server\share paths because the rules for doing so - // are less well-specified. - if len(path) >= 2 && path[:2] == `\\` { - // Don't canonicalize UNC paths. - return path - } - if !isAbs(path) { - // Relative path - return path - } - - const prefix = `\\?` - - pathbuf := make([]byte, len(prefix)+len(path)+len(`\`)) - copy(pathbuf, prefix) - n := len(path) - r, w := 0, len(prefix) - for r < n { - switch { - case os.IsPathSeparator(path[r]): - // empty block - r++ - case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])): - // /./ - r++ - case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])): - // /../ is currently unhandled - return path - default: - pathbuf[w] = '\\' - w++ - for ; r < n && !os.IsPathSeparator(path[r]); r++ { - pathbuf[w] = path[r] - w++ - } - } - } - // A drive's root directory needs a trailing \ - if w == len(`\\?\c:`) { - pathbuf[w] = '\\' - w++ - } - return string(pathbuf[:w]) -} - -func isAbs(path string) (b bool) { - v := volumeName(path) - if v == "" { - return false - } - path = path[len(v):] - if path == "" { - return false - } - return os.IsPathSeparator(path[0]) -} - -func volumeName(path string) (v string) { - if len(path) < 2 { - return "" - } - // with drive letter - c := path[0] - if path[1] == ':' && - ('0' <= c && c <= '9' || 'a' <= c && c <= 'z' || - 'A' <= c && c <= 'Z') { - return path[:2] - } - // is it UNC - if l := len(path); l >= 5 && os.IsPathSeparator(path[0]) && os.IsPathSeparator(path[1]) && - !os.IsPathSeparator(path[2]) && path[2] != '.' { - // first, leading `\\` and next shouldn't be `\`. its server name. - for n := 3; n < l-1; n++ { - // second, next '\' shouldn't be repeated. - if os.IsPathSeparator(path[n]) { - n++ - // third, following something characters. its share name. - if !os.IsPathSeparator(path[n]) { - if path[n] == '.' { - break - } - for ; n < l; n++ { - if os.IsPathSeparator(path[n]) { - break - } - } - return path[:n] - } - break - } - } - } - return "" -} diff --git a/internal/fs/fs_test.go b/internal/fs/fs_test.go deleted file mode 100644 index 9a1c5ef99..000000000 --- a/internal/fs/fs_test.go +++ /dev/null @@ -1,590 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fs - -import ( - "fmt" - "os" - "os/exec" - "path/filepath" - "runtime" - "sync" - "testing" -) - -var ( - mu sync.Mutex -) - -func TestRenameWithFallback(t *testing.T) { - dir := t.TempDir() - - if err := RenameWithFallback(filepath.Join(dir, "does_not_exists"), filepath.Join(dir, "dst")); err == nil { - t.Fatal("expected an error for non existing file, but got nil") - } - - srcpath := filepath.Join(dir, "src") - - if srcf, err := os.Create(srcpath); err != nil { - t.Fatal(err) - } else { - srcf.Close() - } - - if err := RenameWithFallback(srcpath, filepath.Join(dir, "dst")); err != nil { - t.Fatal(err) - } - - srcpath = filepath.Join(dir, "a") - if err := os.MkdirAll(srcpath, 0o770); err != nil { - t.Fatal(err) - } - - dstpath := filepath.Join(dir, "b") - if err := os.MkdirAll(dstpath, 0o770); err != nil { - t.Fatal(err) - } - - if err := RenameWithFallback(srcpath, dstpath); err == nil { - t.Fatal("expected an error if dst is an existing directory, but got nil") - } -} - -func TestCopyDir(t *testing.T) { - dir := t.TempDir() - - srcdir := filepath.Join(dir, "src") - if err := os.MkdirAll(srcdir, 0o750); err != nil { - t.Fatal(err) - } - - files := []struct { - path string - contents string - fi os.FileInfo - }{ - {path: "myfile", contents: "hello world"}, - {path: filepath.Join("subdir", "file"), contents: "subdir file"}, - } - - // Create structure indicated in 'files' - for i, file := range files { - fn := filepath.Join(srcdir, file.path) - dn := filepath.Dir(fn) - if err := os.MkdirAll(dn, 0o750); err != nil { - t.Fatal(err) - } - - fh, err := os.Create(fn) - if err != nil { - t.Fatal(err) - } - - if _, err = fh.Write([]byte(file.contents)); err != nil { - t.Fatal(err) - } - fh.Close() - - files[i].fi, err = os.Stat(fn) - if err != nil { - t.Fatal(err) - } - } - - destdir := filepath.Join(dir, "dest") - if err := CopyDir(srcdir, destdir); err != nil { - t.Fatal(err) - } - - // Compare copy against structure indicated in 'files' - for _, file := range files { - fn := filepath.Join(srcdir, file.path) - dn := filepath.Dir(fn) - dirOK, err := IsDir(dn) - if err != nil { - t.Fatal(err) - } - if !dirOK { - t.Fatalf("expected %s to be a directory", dn) - } - - got, err := os.ReadFile(fn) - if err != nil { - t.Fatal(err) - } - - if file.contents != string(got) { - t.Fatalf("expected: %s, got: %s", file.contents, string(got)) - } - - gotinfo, err := os.Stat(fn) - if err != nil { - t.Fatal(err) - } - - if file.fi.Mode() != gotinfo.Mode() { - t.Fatalf("expected %s: %#v\n to be the same mode as %s: %#v", - file.path, file.fi.Mode(), fn, gotinfo.Mode()) - } - } -} - -func TestCopyDirFail_SrcInaccessible(t *testing.T) { - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in - // Microsoft Windows. Skipping this this until a - // compatible implementation is provided. - t.Skip("skipping on windows") - } - - var srcdir, dstdir string - - setupInaccessibleDir(t, func(dir string) error { - srcdir = filepath.Join(dir, "src") - return os.MkdirAll(srcdir, 0o750) - }) - - dir := t.TempDir() - - dstdir = filepath.Join(dir, "dst") - if err := CopyDir(srcdir, dstdir); err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } -} - -func TestCopyDirFail_DstInaccessible(t *testing.T) { - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in - // Microsoft Windows. Skipping this this until a - // compatible implementation is provided. - t.Skip("skipping on windows") - } - - var srcdir, dstdir string - - dir := t.TempDir() - - srcdir = filepath.Join(dir, "src") - if err := os.MkdirAll(srcdir, 0o750); err != nil { - t.Fatal(err) - } - - setupInaccessibleDir(t, func(dir string) error { - dstdir = filepath.Join(dir, "dst") - return nil - }) - - if err := CopyDir(srcdir, dstdir); err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } -} - -func TestCopyDirFail_SrcIsNotDir(t *testing.T) { - var srcdir, dstdir string - - dir := t.TempDir() - - srcdir = filepath.Join(dir, "src") - if _, err := os.Create(srcdir); err != nil { - t.Fatal(err) - } - - dstdir = filepath.Join(dir, "dst") - - err := CopyDir(srcdir, dstdir) - if err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } - - if err != errSrcNotDir { - t.Fatalf("expected %v error for CopyDir(%s, %s), got %s", errSrcNotDir, srcdir, dstdir, err) - } - -} - -func TestCopyDirFail_DstExists(t *testing.T) { - var srcdir, dstdir string - - dir := t.TempDir() - - srcdir = filepath.Join(dir, "src") - if err := os.MkdirAll(srcdir, 0o750); err != nil { - t.Fatal(err) - } - - dstdir = filepath.Join(dir, "dst") - if err := os.MkdirAll(dstdir, 0o750); err != nil { - t.Fatal(err) - } - - err := CopyDir(srcdir, dstdir) - if err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } - - if err != errDstExist { - t.Fatalf("expected %v error for CopyDir(%s, %s), got %s", errDstExist, srcdir, dstdir, err) - } -} - -func TestCopyDirFailOpen(t *testing.T) { - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in - // Microsoft Windows. os.Chmod(..., 0o222) below is not - // enough for the file to be readonly, and os.Chmod(..., - // 0000) returns an invalid argument error. Skipping - // this this until a compatible implementation is - // provided. - t.Skip("skipping on windows") - } - - var srcdir, dstdir string - - dir := t.TempDir() - - srcdir = filepath.Join(dir, "src") - if err := os.MkdirAll(srcdir, 0o750); err != nil { - t.Fatal(err) - } - - srcfn := filepath.Join(srcdir, "file") - srcf, err := os.Create(srcfn) - if err != nil { - t.Fatal(err) - } - srcf.Close() - - // setup source file so that it cannot be read - if err = os.Chmod(srcfn, 0o220); err != nil { - t.Fatal(err) - } - - dstdir = filepath.Join(dir, "dst") - - if err = CopyDir(srcdir, dstdir); err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } -} - -func TestCopyFile(t *testing.T) { - dir := t.TempDir() - - srcf, err := os.Create(filepath.Join(dir, "srcfile")) - if err != nil { - t.Fatal(err) - } - - want := "hello world" - if _, err := srcf.Write([]byte(want)); err != nil { - t.Fatal(err) - } - srcf.Close() - - destf := filepath.Join(dir, "destf") - if err := copyFile(srcf.Name(), destf); err != nil { - t.Fatal(err) - } - - got, err := os.ReadFile(destf) - if err != nil { - t.Fatal(err) - } - - if want != string(got) { - t.Fatalf("expected: %s, got: %s", want, string(got)) - } - - wantinfo, err := os.Stat(srcf.Name()) - if err != nil { - t.Fatal(err) - } - - gotinfo, err := os.Stat(destf) - if err != nil { - t.Fatal(err) - } - - if wantinfo.Mode() != gotinfo.Mode() { - t.Fatalf("expected %s: %#v\n to be the same mode as %s: %#v", srcf.Name(), wantinfo.Mode(), destf, gotinfo.Mode()) - } -} - -func TestCopyFileSymlink(t *testing.T) { - dir := t.TempDir() - defer cleanUpDir(dir) - - testcases := map[string]string{ - filepath.Join("./testdata/symlinks/file-symlink"): filepath.Join(dir, "dst-file"), - filepath.Join("./testdata/symlinks/windows-file-symlink"): filepath.Join(dir, "windows-dst-file"), - filepath.Join("./testdata/symlinks/invalid-symlink"): filepath.Join(dir, "invalid-symlink"), - } - - for symlink, dst := range testcases { - t.Run(symlink, func(t *testing.T) { - var err error - if err = copyFile(symlink, dst); err != nil { - t.Fatalf("failed to copy symlink: %s", err) - } - - var want, got string - - if runtime.GOOS == "windows" { - // Creating symlinks on Windows require an additional permission - // regular users aren't granted usually. So we copy the file - // content as a fall back instead of creating a real symlink. - srcb, err := os.ReadFile(symlink) - if err != nil { - t.Fatalf("%+v", err) - } - dstb, err := os.ReadFile(dst) - if err != nil { - t.Fatalf("%+v", err) - } - - want = string(srcb) - got = string(dstb) - } else { - want, err = os.Readlink(symlink) - if err != nil { - t.Fatalf("%+v", err) - } - - got, err = os.Readlink(dst) - if err != nil { - t.Fatalf("could not resolve symlink: %s", err) - } - } - - if want != got { - t.Fatalf("resolved path is incorrect. expected %s, got %s", want, got) - } - }) - } -} - -func TestCopyFileLongFilePath(t *testing.T) { - if runtime.GOOS != "windows" { - // We want to ensure the temporary fix actually fixes the issue with - // os.Chmod and long file paths. This is only applicable on Windows. - t.Skip("skipping on non-windows") - } - - dir := t.TempDir() - - // Create a directory with a long-enough path name to cause the bug in #774. - dirName := "" - for len(dir+string(os.PathSeparator)+dirName) <= 300 { - dirName += "directory" - } - - fullPath := filepath.Join(dir, dirName, string(os.PathSeparator)) - if err := os.MkdirAll(fullPath, 0o750); err != nil && !os.IsExist(err) { - t.Fatalf("%+v", fmt.Errorf("unable to create temp directory: %s", fullPath)) - } - - err := os.WriteFile(fullPath+"src", []byte(nil), 0o640) - if err != nil { - t.Fatalf("%+v", err) - } - - err = copyFile(fullPath+"src", fullPath+"dst") - if err != nil { - t.Fatalf("unexpected error while copying file: %v", err) - } -} - -// C:\Users\appveyor\AppData\Local\Temp\1\gotest639065787\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890 - -func TestCopyFileFail(t *testing.T) { - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in - // Microsoft Windows. Skipping this this until a - // compatible implementation is provided. - t.Skip("skipping on windows") - } - - dir := t.TempDir() - - srcf, err := os.Create(filepath.Join(dir, "srcfile")) - if err != nil { - t.Fatal(err) - } - srcf.Close() - - var dstdir string - - setupInaccessibleDir(t, func(dir string) error { - dstdir = filepath.Join(dir, "dir") - return os.Mkdir(dstdir, 0o770) - }) - - fn := filepath.Join(dstdir, "file") - if err := copyFile(srcf.Name(), fn); err == nil { - t.Fatalf("expected error for %s, got none", fn) - } -} - -// setupInaccessibleDir creates a temporary location with a single -// directory in it, in such a way that that directory is not accessible -// after this function returns. -// -// op is called with the directory as argument, so that it can create -// files or other test artifacts. -// -// If setupInaccessibleDir fails in its preparation, or op fails, t.Fatal -// will be invoked. -func setupInaccessibleDir(t *testing.T, op func(dir string) error) { - dir, err := os.MkdirTemp("", "dep") - if err != nil { - t.Fatal(err) - } - - subdir := filepath.Join(dir, "dir") - - t.Cleanup(func() { - if err := os.Chmod(subdir, 0o770); err != nil { - t.Error(err) - } - }) - - if err := os.Mkdir(subdir, 0o770); err != nil { - t.Fatal(err) - } - - if err := op(subdir); err != nil { - t.Fatal(err) - } - - if err := os.Chmod(subdir, 0o660); err != nil { - t.Fatal(err) - } -} - -func TestIsDir(t *testing.T) { - wd, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - var dn string - - setupInaccessibleDir(t, func(dir string) error { - dn = filepath.Join(dir, "dir") - return os.Mkdir(dn, 0o770) - }) - - tests := map[string]struct { - exists bool - err bool - }{ - wd: {true, false}, - filepath.Join(wd, "testdata"): {true, false}, - filepath.Join(wd, "main.go"): {false, true}, - filepath.Join(wd, "this_file_does_not_exist.thing"): {false, true}, - dn: {false, true}, - } - - if runtime.GOOS == "windows" { - // This test doesn't work on Microsoft Windows because - // of the differences in how file permissions are - // implemented. For this to work, the directory where - // the directory exists should be inaccessible. - delete(tests, dn) - } - - for f, want := range tests { - got, err := IsDir(f) - if err != nil && !want.err { - t.Fatalf("expected no error, got %v", err) - } - - if got != want.exists { - t.Fatalf("expected %t for %s, got %t", want.exists, f, got) - } - } -} - -func TestIsSymlink(t *testing.T) { - dir := t.TempDir() - - dirPath := filepath.Join(dir, "directory") - if err := os.MkdirAll(dirPath, 0o770); err != nil { - t.Fatal(err) - } - - filePath := filepath.Join(dir, "file") - f, err := os.Create(filePath) - if err != nil { - t.Fatal(err) - } - f.Close() - - dirSymlink := filepath.Join(dir, "dirSymlink") - fileSymlink := filepath.Join(dir, "fileSymlink") - - if err = os.Symlink(dirPath, dirSymlink); err != nil { - t.Fatal(err) - } - if err = os.Symlink(filePath, fileSymlink); err != nil { - t.Fatal(err) - } - - var ( - inaccessibleFile string - inaccessibleSymlink string - ) - - setupInaccessibleDir(t, func(dir string) error { - inaccessibleFile = filepath.Join(dir, "file") - if fh, err := os.Create(inaccessibleFile); err != nil { - return err - } else if err = fh.Close(); err != nil { - return err - } - - inaccessibleSymlink = filepath.Join(dir, "symlink") - return os.Symlink(inaccessibleFile, inaccessibleSymlink) - }) - - tests := map[string]struct{ expected, err bool }{ - dirPath: {false, false}, - filePath: {false, false}, - dirSymlink: {true, false}, - fileSymlink: {true, false}, - inaccessibleFile: {false, true}, - inaccessibleSymlink: {false, true}, - } - - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in Windows. Skipping - // these cases until a compatible implementation is provided. - delete(tests, inaccessibleFile) - delete(tests, inaccessibleSymlink) - } - - for path, want := range tests { - got, err := IsSymlink(path) - if err != nil { - if !want.err { - t.Errorf("expected no error, got %v", err) - } - } - - if got != want.expected { - t.Errorf("expected %t for %s, got %t", want.expected, path, got) - } - } -} - -func cleanUpDir(dir string) { - if runtime.GOOS == "windows" { - mu.Lock() - exec.Command(`taskkill`, `/F`, `/IM`, `git.exe`).Run() - mu.Unlock() - } - if dir != "" { - os.RemoveAll(dir) - } -} diff --git a/internal/fs/rename.go b/internal/fs/rename.go deleted file mode 100644 index bad1f4778..000000000 --- a/internal/fs/rename.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !windows -// +build !windows - -package fs - -import ( - "fmt" - "os" - "syscall" -) - -// renameFallback attempts to determine the appropriate fallback to failed rename -// operation depending on the resulting error. -func renameFallback(err error, src, dst string) error { - // Rename may fail if src and dst are on different devices; fall back to - // copy if we detect that case. syscall.EXDEV is the common name for the - // cross device link error which has varying output text across different - // operating systems. - terr, ok := err.(*os.LinkError) - if !ok { - return err - } else if terr.Err != syscall.EXDEV { - return fmt.Errorf("link error: cannot rename %s to %s: %w", src, dst, terr) - } - - return renameByCopy(src, dst) -} diff --git a/internal/fs/rename_windows.go b/internal/fs/rename_windows.go deleted file mode 100644 index fa9a0b4d9..000000000 --- a/internal/fs/rename_windows.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows -// +build windows - -package fs - -import ( - "fmt" - "os" - "syscall" -) - -// renameFallback attempts to determine the appropriate fallback to failed rename -// operation depending on the resulting error. -func renameFallback(err error, src, dst string) error { - // Rename may fail if src and dst are on different devices; fall back to - // copy if we detect that case. syscall.EXDEV is the common name for the - // cross device link error which has varying output text across different - // operating systems. - terr, ok := err.(*os.LinkError) - if !ok { - return err - } - - if terr.Err != syscall.EXDEV { - // In windows it can drop down to an operating system call that - // returns an operating system error with a different number and - // message. Checking for that as a fall back. - noerr, ok := terr.Err.(syscall.Errno) - - // 0x11 (ERROR_NOT_SAME_DEVICE) is the windows error. - // See https://msdn.microsoft.com/en-us/library/cc231199.aspx - if ok && noerr != 0x11 { - return fmt.Errorf("link error: cannot rename %s to %s: %w", src, dst, terr) - } - } - - return renameByCopy(src, dst) -} diff --git a/internal/fs/testdata/symlinks/dir-symlink b/internal/fs/testdata/symlinks/dir-symlink deleted file mode 120000 index 777ebd014..000000000 --- a/internal/fs/testdata/symlinks/dir-symlink +++ /dev/null @@ -1 +0,0 @@ -../../testdata \ No newline at end of file diff --git a/internal/fs/testdata/symlinks/file-symlink b/internal/fs/testdata/symlinks/file-symlink deleted file mode 120000 index 4c52274de..000000000 --- a/internal/fs/testdata/symlinks/file-symlink +++ /dev/null @@ -1 +0,0 @@ -../test.file \ No newline at end of file diff --git a/internal/fs/testdata/symlinks/invalid-symlink b/internal/fs/testdata/symlinks/invalid-symlink deleted file mode 120000 index 0edf4f301..000000000 --- a/internal/fs/testdata/symlinks/invalid-symlink +++ /dev/null @@ -1 +0,0 @@ -/non/existing/file \ No newline at end of file diff --git a/internal/fs/testdata/symlinks/windows-file-symlink b/internal/fs/testdata/symlinks/windows-file-symlink deleted file mode 120000 index af1d6c8f5..000000000 --- a/internal/fs/testdata/symlinks/windows-file-symlink +++ /dev/null @@ -1 +0,0 @@ -C:/Users/ibrahim/go/src/github.com/golang/dep/internal/fs/testdata/test.file \ No newline at end of file diff --git a/internal/fs/testdata/test.file b/internal/fs/testdata/test.file deleted file mode 100644 index e69de29bb..000000000 diff --git a/internal/helm/chart/builder.go b/internal/helm/chart/builder.go index b5ac93825..6ac896e78 100644 --- a/internal/helm/chart/builder.go +++ b/internal/helm/chart/builder.go @@ -24,10 +24,11 @@ import ( "regexp" "strings" + sourcefs "github.com/fluxcd/pkg/oci" helmchart "helm.sh/helm/v3/pkg/chart" "helm.sh/helm/v3/pkg/chartutil" - "github.com/fluxcd/source-controller/internal/fs" + "github.com/fluxcd/source-controller/internal/oci" ) // Reference holds information to locate a chart. @@ -106,6 +107,12 @@ type BuildOptions struct { // ValuesFiles can be set to a list of relative paths, used to compose // and overwrite an alternative default "values.yaml" for the chart. ValuesFiles []string + // CachedChartValuesFiles is a list of relative paths that were used to + // build the cached chart. + CachedChartValuesFiles []string + // IgnoreMissingValuesFiles controls whether to silently ignore missing + // values files rather than failing. + IgnoreMissingValuesFiles bool // CachedChart can be set to the absolute path of a chart stored on // the local filesystem, and is used for simple validation by metadata // comparisons. @@ -146,6 +153,9 @@ type Build struct { // This can for example be false if ValuesFiles is empty and the chart // source was already packaged. Packaged bool + // VerifiedResult indicates the results of verifying the chart. + // If no verification was performed, this field should be VerificationResultIgnored. + VerifiedResult oci.VerificationResult } // Summary returns a human-readable summary of the Build. @@ -199,11 +209,6 @@ func (b *Build) String() string { // packageToPath attempts to package the given chart to the out filepath. func packageToPath(chart *helmchart.Chart, out string) error { - // Names cannot have directory name characters. - if chart.Name() != filepath.Base(chart.Name()) { - return fmt.Errorf("%q is not a valid chart name", chart.Name()) - } - o, err := os.MkdirTemp("", "chart-build-*") if err != nil { return fmt.Errorf("failed to create temporary directory for chart: %w", err) @@ -214,7 +219,7 @@ func packageToPath(chart *helmchart.Chart, out string) error { if err != nil { return fmt.Errorf("failed to package chart: %w", err) } - if err = fs.RenameWithFallback(p, out); err != nil { + if err = sourcefs.RenameWithFallback(p, out); err != nil { return fmt.Errorf("failed to write chart to file: %w", err) } return nil diff --git a/internal/helm/chart/builder_local.go b/internal/helm/chart/builder_local.go index 0e0b20c28..44399a80a 100644 --- a/internal/helm/chart/builder_local.go +++ b/internal/helm/chart/builder_local.go @@ -121,6 +121,11 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, if result.Name == curMeta.Name && result.Version == curMeta.Version { result.Path = opts.CachedChart result.ValuesFiles = opts.GetValuesFiles() + if opts.CachedChartValuesFiles != nil { + // If the cached chart values files are set, we should use them + // instead of reporting the values files. + result.ValuesFiles = opts.CachedChartValuesFiles + } result.Packaged = requiresPackaging return result, nil @@ -140,9 +145,12 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, } // Merge chart values, if instructed - var mergedValues map[string]interface{} + var ( + mergedValues map[string]interface{} + valuesFiles []string + ) if len(opts.GetValuesFiles()) > 0 { - if mergedValues, err = mergeFileValues(localRef.WorkDir, opts.ValuesFiles); err != nil { + if mergedValues, valuesFiles, err = mergeFileValues(localRef.WorkDir, opts.ValuesFiles, opts.IgnoreMissingValuesFiles); err != nil { return result, &BuildError{Reason: ErrValuesFilesMerge, Err: err} } } @@ -163,7 +171,7 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, if err != nil { return result, &BuildError{Reason: ErrValuesFilesMerge, Err: err} } - result.ValuesFiles = opts.GetValuesFiles() + result.ValuesFiles = valuesFiles } // Ensure dependencies are fetched if building from a directory @@ -187,31 +195,42 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, } // mergeFileValues merges the given value file paths into a single "values.yaml" map. -// The provided (relative) paths may not traverse outside baseDir. It returns the merge -// result, or an error. -func mergeFileValues(baseDir string, paths []string) (map[string]interface{}, error) { +// The provided (relative) paths may not traverse outside baseDir. By default, a missing +// file is considered an error. If ignoreMissing is true, missing files are ignored. +// It returns the merge result and the list of files that contributed to that result, +// or an error. +func mergeFileValues(baseDir string, paths []string, ignoreMissing bool) (map[string]interface{}, []string, error) { mergedValues := make(map[string]interface{}) + valuesFiles := make([]string, 0, len(paths)) for _, p := range paths { secureP, err := securejoin.SecureJoin(baseDir, p) if err != nil { - return nil, err + return nil, nil, err } - if f, err := os.Stat(secureP); err != nil || !f.Mode().IsRegular() { - return nil, fmt.Errorf("no values file found at path '%s' (reference '%s')", + f, err := os.Stat(secureP) + switch { + case err != nil: + if ignoreMissing && os.IsNotExist(err) { + continue + } + fallthrough + case !f.Mode().IsRegular(): + return nil, nil, fmt.Errorf("no values file found at path '%s' (reference '%s')", strings.TrimPrefix(secureP, baseDir), p) } b, err := os.ReadFile(secureP) if err != nil { - return nil, fmt.Errorf("could not read values from file '%s': %w", p, err) + return nil, nil, fmt.Errorf("could not read values from file '%s': %w", p, err) } values := make(map[string]interface{}) err = yaml.Unmarshal(b, &values) if err != nil { - return nil, fmt.Errorf("unmarshaling values from '%s' failed: %w", p, err) + return nil, nil, fmt.Errorf("unmarshaling values from '%s' failed: %w", p, err) } mergedValues = transform.MergeMaps(mergedValues, values) + valuesFiles = append(valuesFiles, p) } - return mergedValues, nil + return mergedValues, valuesFiles, nil } // copyFileToPath attempts to copy in to out. It returns an error if out already exists. diff --git a/internal/helm/chart/builder_local_test.go b/internal/helm/chart/builder_local_test.go index 626dc072e..4b26e1419 100644 --- a/internal/helm/chart/builder_local_test.go +++ b/internal/helm/chart/builder_local_test.go @@ -93,7 +93,7 @@ func TestLocalBuilder_Build(t *testing.T) { name: "invalid version metadata", reference: LocalReference{Path: "../testdata/charts/helmchart"}, buildOpts: BuildOptions{VersionMetadata: "^"}, - wantErr: "Invalid Metadata string", + wantErr: "invalid metadata string", }, { name: "with version metadata", @@ -281,11 +281,13 @@ func TestLocalBuilder_Build_CachedChart(t *testing.T) { func Test_mergeFileValues(t *testing.T) { tests := []struct { - name string - files []*helmchart.File - paths []string - want map[string]interface{} - wantErr string + name string + files []*helmchart.File + paths []string + ignoreMissing bool + wantValues map[string]interface{} + wantFiles []string + wantErr string }{ { name: "merges values from files", @@ -295,10 +297,11 @@ func Test_mergeFileValues(t *testing.T) { {Name: "c.yaml", Data: []byte("b: d")}, }, paths: []string{"a.yaml", "b.yaml", "c.yaml"}, - want: map[string]interface{}{ + wantValues: map[string]interface{}{ "a": "b", "b": "d", }, + wantFiles: []string{"a.yaml", "b.yaml", "c.yaml"}, }, { name: "illegal traverse", @@ -318,6 +321,25 @@ func Test_mergeFileValues(t *testing.T) { paths: []string{"a.yaml"}, wantErr: "no values file found at path '/a.yaml'", }, + { + name: "ignore missing files", + files: []*helmchart.File{ + {Name: "a.yaml", Data: []byte("a: b")}, + }, + paths: []string{"a.yaml", "b.yaml"}, + ignoreMissing: true, + wantValues: map[string]interface{}{ + "a": "b", + }, + wantFiles: []string{"a.yaml"}, + }, + { + name: "all files missing", + paths: []string{"a.yaml"}, + ignoreMissing: true, + wantValues: map[string]interface{}{}, + wantFiles: []string{}, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -329,16 +351,18 @@ func Test_mergeFileValues(t *testing.T) { g.Expect(os.WriteFile(filepath.Join(baseDir, f.Name), f.Data, 0o640)).To(Succeed()) } - got, err := mergeFileValues(baseDir, tt.paths) + gotValues, gotFiles, err := mergeFileValues(baseDir, tt.paths, tt.ignoreMissing) if tt.wantErr != "" { g.Expect(err).To(HaveOccurred()) g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) - g.Expect(got).To(BeNil()) + g.Expect(gotValues).To(BeNil()) + g.Expect(gotFiles).To(BeNil()) return } g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got).To(Equal(tt.want)) + g.Expect(gotValues).To(Equal(tt.wantValues)) + g.Expect(gotFiles).To(Equal(tt.wantFiles)) }) } } diff --git a/internal/helm/chart/builder_remote.go b/internal/helm/chart/builder_remote.go index 5ecfe9873..2cfdf81b4 100644 --- a/internal/helm/chart/builder_remote.go +++ b/internal/helm/chart/builder_remote.go @@ -30,11 +30,12 @@ import ( "helm.sh/helm/v3/pkg/repo" "sigs.k8s.io/yaml" + sourcefs "github.com/fluxcd/pkg/oci" "github.com/fluxcd/pkg/runtime/transform" - "github.com/fluxcd/source-controller/internal/fs" "github.com/fluxcd/source-controller/internal/helm/chart/secureloader" "github.com/fluxcd/source-controller/internal/helm/repository" + "github.com/fluxcd/source-controller/internal/oci" ) type remoteChartBuilder struct { @@ -102,7 +103,7 @@ func (b *remoteChartBuilder) Build(ctx context.Context, ref Reference, p string, } chart.Metadata.Version = result.Version - mergedValues, err := mergeChartValues(chart, opts.ValuesFiles) + mergedValues, valuesFiles, err := mergeChartValues(chart, opts.ValuesFiles, opts.IgnoreMissingValuesFiles) if err != nil { err = fmt.Errorf("failed to merge chart values: %w", err) return result, &BuildError{Reason: ErrValuesFilesMerge, Err: err} @@ -112,7 +113,7 @@ func (b *remoteChartBuilder) Build(ctx context.Context, ref Reference, p string, if err != nil { return nil, &BuildError{Reason: ErrValuesFilesMerge, Err: err} } - result.ValuesFiles = opts.GetValuesFiles() + result.ValuesFiles = valuesFiles } // Package the chart with the custom values @@ -141,9 +142,11 @@ func (b *remoteChartBuilder) downloadFromRepository(ctx context.Context, remote return nil, nil, &BuildError{Reason: reason, Err: err} } + verifiedResult := oci.VerificationResultIgnored + // Verify the chart if necessary if opts.Verify { - if err := remote.VerifyChart(ctx, cv); err != nil { + if verifiedResult, err = remote.VerifyChart(ctx, cv); err != nil { return nil, nil, &BuildError{Reason: ErrChartVerification, Err: err} } } @@ -153,6 +156,8 @@ func (b *remoteChartBuilder) downloadFromRepository(ctx context.Context, remote return nil, nil, err } + result.VerifiedResult = verifiedResult + if shouldReturn { return nil, result, nil } @@ -173,6 +178,7 @@ func generateBuildResult(cv *repo.ChartVersion, opts BuildOptions) (*Build, bool result := &Build{} result.Version = cv.Version result.Name = cv.Name + result.VerifiedResult = oci.VerificationResultIgnored // Set build specific metadata if instructed if opts.VersionMetadata != "" { @@ -197,6 +203,11 @@ func generateBuildResult(cv *repo.ChartVersion, opts BuildOptions) (*Build, bool if result.Name == curMeta.Name && result.Version == curMeta.Version { result.Path = opts.CachedChart result.ValuesFiles = opts.GetValuesFiles() + if opts.CachedChartValuesFiles != nil { + // If the cached chart values files are set, we should use them + // instead of reporting the values files. + result.ValuesFiles = opts.CachedChartValuesFiles + } result.Packaged = requiresPackaging return result, true, nil } @@ -220,13 +231,18 @@ func setBuildMetaData(version, versionMetadata string) (*semver.Version, error) } // mergeChartValues merges the given chart.Chart Files paths into a single "values.yaml" map. -// It returns the merge result, or an error. -func mergeChartValues(chart *helmchart.Chart, paths []string) (map[string]interface{}, error) { +// By default, a missing file is considered an error. If ignoreMissing is set true, +// missing files are ignored. +// It returns the merge result and the list of files that contributed to that result, +// or an error. +func mergeChartValues(chart *helmchart.Chart, paths []string, ignoreMissing bool) (map[string]interface{}, []string, error) { mergedValues := make(map[string]interface{}) + valuesFiles := make([]string, 0, len(paths)) for _, p := range paths { cfn := filepath.Clean(p) if cfn == chartutil.ValuesfileName { mergedValues = transform.MergeMaps(mergedValues, chart.Values) + valuesFiles = append(valuesFiles, p) continue } var b []byte @@ -237,15 +253,19 @@ func mergeChartValues(chart *helmchart.Chart, paths []string) (map[string]interf } } if b == nil { - return nil, fmt.Errorf("no values file found at path '%s'", p) + if ignoreMissing { + continue + } + return nil, nil, fmt.Errorf("no values file found at path '%s'", p) } values := make(map[string]interface{}) if err := yaml.Unmarshal(b, &values); err != nil { - return nil, fmt.Errorf("unmarshaling values from '%s' failed: %w", p, err) + return nil, nil, fmt.Errorf("unmarshaling values from '%s' failed: %w", p, err) } mergedValues = transform.MergeMaps(mergedValues, values) + valuesFiles = append(valuesFiles, p) } - return mergedValues, nil + return mergedValues, valuesFiles, nil } // validatePackageAndWriteToPath atomically writes the packaged chart from reader @@ -270,7 +290,7 @@ func validatePackageAndWriteToPath(reader io.Reader, out string) error { if err = meta.Validate(); err != nil { return fmt.Errorf("failed to validate metadata of written chart: %w", err) } - if err = fs.RenameWithFallback(tmpFile.Name(), out); err != nil { + if err = sourcefs.RenameWithFallback(tmpFile.Name(), out); err != nil { return fmt.Errorf("failed to write chart to file: %w", err) } return nil diff --git a/internal/helm/chart/builder_remote_test.go b/internal/helm/chart/builder_remote_test.go index fa4fcf3ef..7994fa5ee 100644 --- a/internal/helm/chart/builder_remote_test.go +++ b/internal/helm/chart/builder_remote_test.go @@ -99,6 +99,7 @@ entries: - https://example.com/grafana.tgz description: string version: 6.17.4 + name: grafana `) mockGetter := &mockIndexChartGetter{ @@ -151,7 +152,7 @@ entries: reference: RemoteReference{Name: "grafana"}, repository: mockRepo(), buildOpts: BuildOptions{VersionMetadata: "^"}, - wantErr: "Invalid Metadata string", + wantErr: "invalid metadata string", }, { name: "with version metadata", @@ -299,7 +300,7 @@ func TestRemoteBuilder_BuildFromOCIChartRepository(t *testing.T) { reference: RemoteReference{Name: "grafana"}, repository: mockRepo(), buildOpts: BuildOptions{VersionMetadata: "^"}, - wantErr: "Invalid Metadata string", + wantErr: "invalid metadata string", }, { name: "with version metadata", @@ -443,11 +444,13 @@ entries: func Test_mergeChartValues(t *testing.T) { tests := []struct { - name string - chart *helmchart.Chart - paths []string - want map[string]interface{} - wantErr string + name string + chart *helmchart.Chart + paths []string + ignoreMissing bool + wantValues map[string]interface{} + wantFiles []string + wantErr string }{ { name: "merges values", @@ -459,10 +462,11 @@ func Test_mergeChartValues(t *testing.T) { }, }, paths: []string{"a.yaml", "b.yaml", "c.yaml"}, - want: map[string]interface{}{ + wantValues: map[string]interface{}{ "a": "b", "b": "d", }, + wantFiles: []string{"a.yaml", "b.yaml", "c.yaml"}, }, { name: "uses chart values", @@ -475,10 +479,11 @@ func Test_mergeChartValues(t *testing.T) { }, }, paths: []string{chartutil.ValuesfileName, "c.yaml"}, - want: map[string]interface{}{ + wantValues: map[string]interface{}{ "a": "b", "b": "d", }, + wantFiles: []string{chartutil.ValuesfileName, "c.yaml"}, }, { name: "unmarshal error", @@ -496,21 +501,59 @@ func Test_mergeChartValues(t *testing.T) { paths: []string{"a.yaml"}, wantErr: "no values file found at path 'a.yaml'", }, + { + name: "merges values ignoring file missing", + chart: &helmchart.Chart{ + Files: []*helmchart.File{ + {Name: "a.yaml", Data: []byte("a: b")}, + }, + }, + paths: []string{"a.yaml", "b.yaml"}, + ignoreMissing: true, + wantValues: map[string]interface{}{ + "a": "b", + }, + wantFiles: []string{"a.yaml"}, + }, + { + name: "merges values ignoring all missing", + chart: &helmchart.Chart{}, + paths: []string{"a.yaml"}, + ignoreMissing: true, + wantValues: map[string]interface{}{}, + wantFiles: []string{}, + }, + { + name: "uses chart values ignoring missing file", + chart: &helmchart.Chart{ + Values: map[string]interface{}{ + "a": "b", + }, + }, + paths: []string{chartutil.ValuesfileName, "c.yaml"}, + ignoreMissing: true, + wantValues: map[string]interface{}{ + "a": "b", + }, + wantFiles: []string{chartutil.ValuesfileName}, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - got, err := mergeChartValues(tt.chart, tt.paths) + gotValues, gotFiles, err := mergeChartValues(tt.chart, tt.paths, tt.ignoreMissing) if tt.wantErr != "" { g.Expect(err).To(HaveOccurred()) g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) - g.Expect(got).To(BeNil()) + g.Expect(gotValues).To(BeNil()) + g.Expect(gotFiles).To(BeNil()) return } g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got).To(Equal(tt.want)) + g.Expect(gotValues).To(Equal(tt.wantValues)) + g.Expect(gotFiles).To(Equal(tt.wantFiles)) }) } } diff --git a/internal/helm/chart/builder_test.go b/internal/helm/chart/builder_test.go index be348b552..d3fa55e38 100644 --- a/internal/helm/chart/builder_test.go +++ b/internal/helm/chart/builder_test.go @@ -255,14 +255,6 @@ func Test_packageToPath(t *testing.T) { g.Expect(out).To(BeARegularFile()) _, err = secureloader.LoadFile(out) g.Expect(err).ToNot(HaveOccurred()) - - chart, err = secureloader.LoadFile("../testdata/charts/helmchart-badname-0.1.0.tgz") - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(chart).ToNot(BeNil()) - - out2 := tmpFile("chart-badname-0.1.0", ".tgz") - err = packageToPath(chart, out2) - g.Expect(err).To(HaveOccurred()) } func tmpFile(prefix, suffix string) string { diff --git a/internal/helm/chart/dependency_manager.go b/internal/helm/chart/dependency_manager.go index 97b1534a4..8a3f0ccfb 100644 --- a/internal/helm/chart/dependency_manager.go +++ b/internal/helm/chart/dependency_manager.go @@ -296,6 +296,9 @@ func (dm *DependencyManager) resolveRepository(url string) (repo repository.Down // It does not allow the dependency's path to be outside the scope of // LocalReference.WorkDir. func (dm *DependencyManager) secureLocalChartPath(ref LocalReference, dep *helmchart.Dependency) (string, error) { + if dep.Repository == "" { + return securejoin.SecureJoin(ref.WorkDir, filepath.Join(ref.Path, "charts", dep.Name)) + } localUrl, err := url.Parse(dep.Repository) if err != nil { return "", fmt.Errorf("failed to parse alleged local chart reference: %w", err) diff --git a/internal/helm/chart/dependency_manager_test.go b/internal/helm/chart/dependency_manager_test.go index fcd7015a7..241959fbe 100644 --- a/internal/helm/chart/dependency_manager_test.go +++ b/internal/helm/chart/dependency_manager_test.go @@ -290,13 +290,15 @@ func TestDependencyManager_build(t *testing.T) { func TestDependencyManager_addLocalDependency(t *testing.T) { tests := []struct { - name string - dep *helmchart.Dependency - wantErr string - wantFunc func(g *WithT, c *helmchart.Chart) + name string + chartName string + dep *helmchart.Dependency + wantErr string + wantFunc func(g *WithT, c *helmchart.Chart) }{ { - name: "local dependency", + name: "local dependency", + chartName: "helmchartwithdeps", dep: &helmchart.Dependency{ Name: chartName, Version: chartVersion, @@ -307,7 +309,8 @@ func TestDependencyManager_addLocalDependency(t *testing.T) { }, }, { - name: "version not matching constraint", + name: "version not matching constraint", + chartName: "helmchartwithdeps", dep: &helmchart.Dependency{ Name: chartName, Version: "0.2.0", @@ -316,7 +319,8 @@ func TestDependencyManager_addLocalDependency(t *testing.T) { wantErr: "can't get a valid version for constraint '0.2.0'", }, { - name: "invalid local reference", + name: "invalid local reference", + chartName: "helmchartwithdeps", dep: &helmchart.Dependency{ Name: chartName, Version: chartVersion, @@ -325,7 +329,8 @@ func TestDependencyManager_addLocalDependency(t *testing.T) { wantErr: "no chart found at '/absolutely/invalid'", }, { - name: "invalid chart archive", + name: "invalid chart archive", + chartName: "helmchartwithdeps", dep: &helmchart.Dependency{ Name: chartName, Version: chartVersion, @@ -334,7 +339,8 @@ func TestDependencyManager_addLocalDependency(t *testing.T) { wantErr: "failed to load chart from '/empty.tgz'", }, { - name: "invalid constraint", + name: "invalid constraint", + chartName: "helmchartwithdeps", dep: &helmchart.Dependency{ Name: chartName, Version: "invalid", @@ -342,6 +348,26 @@ func TestDependencyManager_addLocalDependency(t *testing.T) { }, wantErr: "invalid version/constraint format 'invalid'", }, + { + name: "no repository", + chartName: "helmchartwithdepsnorepo", + dep: &helmchart.Dependency{ + Name: chartName, + Version: chartVersion, + }, + wantFunc: func(g *WithT, c *helmchart.Chart) { + g.Expect(c.Dependencies()).To(HaveLen(1)) + }, + }, + { + name: "no repository invalid reference", + chartName: "helmchartwithdepsnorepo", + dep: &helmchart.Dependency{ + Name: "nonexistingchart", + Version: chartVersion, + }, + wantErr: "no chart found at '/helmchartwithdepsnorepo/charts/nonexistingchart'", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -353,7 +379,7 @@ func TestDependencyManager_addLocalDependency(t *testing.T) { absWorkDir, err := filepath.Abs("../testdata/charts") g.Expect(err).ToNot(HaveOccurred()) - err = dm.addLocalDependency(LocalReference{WorkDir: absWorkDir, Path: "helmchartwithdeps"}, + err = dm.addLocalDependency(LocalReference{WorkDir: absWorkDir, Path: tt.chartName}, &chartWithLock{Chart: chart}, tt.dep) if tt.wantErr != "" { g.Expect(err).To(HaveOccurred()) @@ -844,6 +870,15 @@ func TestDependencyManager_secureLocalChartPath(t *testing.T) { }, wantErr: "not a local chart reference", }, + { + name: "local dependency with empty repository", + dep: &helmchart.Dependency{ + Name: "some-subchart", + }, + baseDir: "/tmp/workdir", + path: "/chart", + want: "/tmp/workdir/chart/charts/some-subchart", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/internal/helm/common/string_resource.go b/internal/helm/common/string_resource.go new file mode 100644 index 000000000..b4cdada9f --- /dev/null +++ b/internal/helm/common/string_resource.go @@ -0,0 +1,39 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import "strings" + +// StringResource is there to satisfy the github.com/google/go-containerregistry/pkg/authn.Resource interface. +// It merely wraps a given string and returns it for all of the interface's methods. +type StringResource struct { + Registry string +} + +// String returns a string representation of the StringResource. +// It converts the StringResource object to a string. +// The returned string contains the value of the StringResource. +func (r StringResource) String() string { + return r.Registry +} + +// RegistryStr returns the string representation of the registry resource. +// It converts the StringResource object to a string that represents the registry resource. +// The returned string can be used to interact with the registry resource. +func (r StringResource) RegistryStr() string { + return strings.Split(r.Registry, "/")[0] +} diff --git a/internal/helm/getter/client_opts.go b/internal/helm/getter/client_opts.go index 4dfc97b40..e40811b39 100644 --- a/internal/helm/getter/client_opts.go +++ b/internal/helm/getter/client_opts.go @@ -24,7 +24,6 @@ import ( "os" "path" - "github.com/fluxcd/pkg/oci" "github.com/google/go-containerregistry/pkg/authn" helmgetter "helm.sh/helm/v3/pkg/getter" helmreg "helm.sh/helm/v3/pkg/registry" @@ -32,10 +31,11 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - helmv1 "github.com/fluxcd/source-controller/api/v1beta2" + "github.com/fluxcd/pkg/runtime/secrets" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" "github.com/fluxcd/source-controller/internal/helm/registry" soci "github.com/fluxcd/source-controller/internal/oci" - stls "github.com/fluxcd/source-controller/internal/tls" ) const ( @@ -54,6 +54,7 @@ type ClientOpts struct { RegLoginOpts []helmreg.LoginOption TlsConfig *tls.Config GetterOpts []helmgetter.Option + Insecure bool } // MustLoginToRegistry returns true if the client options contain at least @@ -68,111 +69,164 @@ func (o ClientOpts) MustLoginToRegistry() bool { // auth mechanisms. // A temporary directory is created to store the certs files if needed and its path is returned along with the options object. It is the // caller's responsibility to clean up the directory. -func GetClientOpts(ctx context.Context, c client.Client, obj *helmv1.HelmRepository, url string) (*ClientOpts, string, error) { - hrOpts := &ClientOpts{ +func GetClientOpts(ctx context.Context, c client.Client, obj *sourcev1.HelmRepository, url string) (*ClientOpts, string, error) { + // This function configures authentication for Helm repositories based on the provided secrets: + // - CertSecretRef: TLS client certificates (always takes priority) + // - SecretRef: Can contain Basic Auth or TLS certificates (deprecated) + // For OCI repositories, additional registry-specific authentication is configured (including Docker config) + opts := &ClientOpts{ GetterOpts: []helmgetter.Option{ helmgetter.WithURL(url), helmgetter.WithTimeout(obj.GetTimeout()), helmgetter.WithPassCredentialsAll(obj.Spec.PassCredentials), }, + Insecure: obj.Spec.Insecure, } - ociRepo := obj.Spec.Type == helmv1.HelmRepositoryTypeOCI - var ( - certSecret *corev1.Secret - tlsBytes *stls.TLSBytes - certFile string - keyFile string - caFile string - dir string - err error - ) - // Check `.spec.certSecretRef` first for any TLS auth data. + // Process secrets and configure authentication + deprecatedTLS, certSecret, authSecret, err := configureAuthentication(ctx, c, obj, opts, url) + if err != nil { + return nil, "", err + } + + // Setup OCI registry specific configurations if needed + var tempCertDir string + if obj.Spec.Type == sourcev1.HelmRepositoryTypeOCI { + tempCertDir, err = configureOCIRegistryWithSecrets(ctx, obj, opts, url, certSecret, authSecret) + if err != nil { + return nil, "", err + } + } + + var deprecatedErr error + if deprecatedTLS { + deprecatedErr = ErrDeprecatedTLSConfig + } + + return opts, tempCertDir, deprecatedErr +} + +// configureAuthentication processes all secret references and sets up authentication. +// Returns (deprecatedTLS, certSecret, authSecret, error) where: +// - deprecatedTLS: true if TLS config comes from SecretRef (deprecated pattern) +// - certSecret: the secret from CertSecretRef (nil if not specified) +// - authSecret: the secret from SecretRef (nil if not specified) +func configureAuthentication(ctx context.Context, c client.Client, obj *sourcev1.HelmRepository, opts *ClientOpts, url string) (bool, *corev1.Secret, *corev1.Secret, error) { + var deprecatedTLS bool + var certSecret, authSecret *corev1.Secret + if obj.Spec.CertSecretRef != nil { - certSecret, err = fetchSecret(ctx, c, obj.Spec.CertSecretRef.Name, obj.GetNamespace()) + secret, err := fetchSecret(ctx, c, obj.Spec.CertSecretRef.Name, obj.GetNamespace()) if err != nil { - return nil, "", fmt.Errorf("failed to get TLS authentication secret '%s/%s': %w", obj.GetNamespace(), obj.Spec.CertSecretRef.Name, err) + secretRef := types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.Spec.CertSecretRef.Name} + return false, nil, nil, fmt.Errorf("failed to get TLS authentication secret '%s': %w", secretRef, err) } + certSecret = secret - hrOpts.TlsConfig, tlsBytes, err = stls.KubeTLSClientConfigFromSecret(*certSecret, url) + // NOTE: Use WithSystemCertPool to maintain backward compatibility with the existing + // extend approach (system CAs + user CA) rather than the default replace approach (user CA only). + // This ensures HelmRepository continues to work with both system and user-provided CA certificates. + var tlsOpts = []secrets.TLSConfigOption{secrets.WithSystemCertPool()} + tlsConfig, err := secrets.TLSConfigFromSecret(ctx, secret, tlsOpts...) if err != nil { - return nil, "", fmt.Errorf("failed to construct Helm client's TLS config: %w", err) + return false, nil, nil, fmt.Errorf("failed to construct Helm client's TLS config: %w", err) } + opts.TlsConfig = tlsConfig } - var authSecret *corev1.Secret - var deprecatedTLSConfig bool + // Extract all authentication methods from SecretRef. + // This secret may contain multiple auth types (Basic Auth, TLS). if obj.Spec.SecretRef != nil { - authSecret, err = fetchSecret(ctx, c, obj.Spec.SecretRef.Name, obj.GetNamespace()) + secret, err := fetchSecret(ctx, c, obj.Spec.SecretRef.Name, obj.GetNamespace()) if err != nil { - return nil, "", fmt.Errorf("failed to get authentication secret '%s/%s': %w", obj.GetNamespace(), obj.Spec.SecretRef.Name, err) + secretRef := types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.Spec.SecretRef.Name} + return false, nil, nil, fmt.Errorf("failed to get authentication secret '%s': %w", secretRef, err) } + authSecret = secret - // Construct actual Helm client options. - opts, err := GetterOptionsFromSecret(*authSecret) + // NOTE: Use WithTLSSystemCertPool to maintain backward compatibility with the existing + // extend approach (system CAs + user CA) rather than the default replace approach (user CA only). + // This ensures HelmRepository auth methods work with both system and user-provided CA certificates. + var authOpts = []secrets.AuthMethodsOption{ + secrets.WithTLSSystemCertPool(), + } + methods, err := secrets.AuthMethodsFromSecret(ctx, secret, authOpts...) if err != nil { - return nil, "", fmt.Errorf("failed to configure Helm client: %w", err) + return false, nil, nil, fmt.Errorf("failed to detect authentication methods: %w", err) } - hrOpts.GetterOpts = append(hrOpts.GetterOpts, opts...) - // If the TLS config is nil, i.e. one couldn't be constructed using - // `.spec.certSecretRef`, then try to use `.spec.secretRef`. - if hrOpts.TlsConfig == nil && !ociRepo { - hrOpts.TlsConfig, tlsBytes, err = stls.LegacyTLSClientConfigFromSecret(*authSecret, url) - if err != nil { - return nil, "", fmt.Errorf("failed to construct Helm client's TLS config: %w", err) - } - // Constructing a TLS config using the auth secret is deprecated behavior. - if hrOpts.TlsConfig != nil { - deprecatedTLSConfig = true - } + if methods.HasBasicAuth() { + opts.GetterOpts = append(opts.GetterOpts, + helmgetter.WithBasicAuth(methods.Basic.Username, methods.Basic.Password)) } - if ociRepo { - hrOpts.Keychain, err = registry.LoginOptionFromSecret(url, *authSecret) - if err != nil { - return nil, "", fmt.Errorf("failed to configure login options: %w", err) - } + // Use TLS from SecretRef only if CertSecretRef is not specified (CertSecretRef takes priority) + if opts.TlsConfig == nil && methods.HasTLS() { + opts.TlsConfig = methods.TLS + deprecatedTLS = true } - } else if obj.Spec.Provider != helmv1.GenericOCIProvider && obj.Spec.Type == helmv1.HelmRepositoryTypeOCI && ociRepo { - authenticator, authErr := soci.OIDCAuth(ctx, obj.Spec.URL, obj.Spec.Provider) - if authErr != nil && !errors.Is(authErr, oci.ErrUnconfiguredProvider) { - return nil, "", fmt.Errorf("failed to get credential from '%s': %w", obj.Spec.Provider, authErr) + } + + return deprecatedTLS, certSecret, authSecret, nil +} + +// configureOCIRegistryWithSecrets sets up OCI-specific configurations using pre-fetched secrets +func configureOCIRegistryWithSecrets(ctx context.Context, obj *sourcev1.HelmRepository, opts *ClientOpts, url string, certSecret, authSecret *corev1.Secret) (string, error) { + // Configure OCI authentication from authSecret if available + if authSecret != nil { + keychain, err := registry.LoginOptionFromSecret(url, *authSecret) + if err != nil { + return "", fmt.Errorf("failed to configure login options: %w", err) } - if authenticator != nil { - hrOpts.Authenticator = authenticator + opts.Keychain = keychain + } + + // Handle OCI provider authentication if no SecretRef + if obj.Spec.SecretRef == nil && obj.Spec.Provider != "" && obj.Spec.Provider != sourcev1.GenericOCIProvider { + authenticator, err := soci.OIDCAuth(ctx, url, obj.Spec.Provider) + if err != nil { + return "", fmt.Errorf("failed to get credential from '%s': %w", obj.Spec.Provider, err) } + opts.Authenticator = authenticator } - if ociRepo { - // Persist the certs files to the path if needed. - if tlsBytes != nil { - dir, err = os.MkdirTemp("", "helm-repo-oci-certs") - if err != nil { - return nil, "", fmt.Errorf("cannot create temporary directory: %w", err) - } - certFile, keyFile, caFile, err = storeTLSCertificateFiles(tlsBytes, dir) - if err != nil { - return nil, "", fmt.Errorf("cannot write certs files to path: %w", err) - } + // Setup registry login options + loginOpt, err := registry.NewLoginOption(opts.Authenticator, opts.Keychain, url) + if err != nil { + return "", err + } + + if loginOpt != nil { + opts.RegLoginOpts = []helmreg.LoginOption{loginOpt, helmreg.LoginOptInsecure(obj.Spec.Insecure)} + } + + // Handle TLS certificate files for OCI + var tempCertDir string + if opts.TlsConfig != nil { + tempCertDir, err = os.MkdirTemp("", "helm-repo-oci-certs") + if err != nil { + return "", fmt.Errorf("cannot create temporary directory: %w", err) + } + + var tlsSecret *corev1.Secret + if certSecret != nil { + tlsSecret = certSecret + } else if authSecret != nil { + tlsSecret = authSecret } - loginOpt, err := registry.NewLoginOption(hrOpts.Authenticator, hrOpts.Keychain, url) + + certFile, keyFile, caFile, err := storeTLSCertificateFilesForOCI(ctx, tlsSecret, nil, tempCertDir) if err != nil { - return nil, "", err + return "", fmt.Errorf("cannot write certs files to path: %w", err) } - if loginOpt != nil { - hrOpts.RegLoginOpts = []helmreg.LoginOption{loginOpt} - tlsLoginOpt := registry.TLSLoginOption(certFile, keyFile, caFile) - if tlsLoginOpt != nil { - hrOpts.RegLoginOpts = append(hrOpts.RegLoginOpts, tlsLoginOpt) - } + + tlsLoginOpt := registry.TLSLoginOption(certFile, keyFile, caFile) + if tlsLoginOpt != nil { + opts.RegLoginOpts = append(opts.RegLoginOpts, tlsLoginOpt) } } - if deprecatedTLSConfig { - err = ErrDeprecatedTLSConfig - } - return hrOpts, dir, err + return tempCertDir, nil } func fetchSecret(ctx context.Context, c client.Client, name, namespace string) (*corev1.Secret, error) { @@ -187,30 +241,48 @@ func fetchSecret(ctx context.Context, c client.Client, name, namespace string) ( return &secret, nil } -// storeTLSCertificateFiles writes the certs files to the given path and returns the files paths. -func storeTLSCertificateFiles(tlsBytes *stls.TLSBytes, path string) (string, string, string, error) { +// storeTLSCertificateFilesForOCI writes TLS certificate data from secrets to files for OCI registry authentication. +// Helm OCI registry client requires certificate file paths rather than in-memory data, +// so we need to temporarily write the certificate data to disk. +// Returns paths to the written cert, key, and CA files (any of which may be empty if not present). +func storeTLSCertificateFilesForOCI(ctx context.Context, certSecret, authSecret *corev1.Secret, path string) (string, string, string, error) { var ( certFile string keyFile string caFile string err error ) - if len(tlsBytes.CertBytes) > 0 && len(tlsBytes.KeyBytes) > 0 { - certFile, err = writeToFile(tlsBytes.CertBytes, certFileName, path) - if err != nil { - return "", "", "", err - } - keyFile, err = writeToFile(tlsBytes.KeyBytes, keyFileName, path) - if err != nil { - return "", "", "", err - } + + // Try to get TLS data from certSecret first, then authSecret + var tlsSecret *corev1.Secret + if certSecret != nil { + tlsSecret = certSecret + } else if authSecret != nil { + tlsSecret = authSecret } - if len(tlsBytes.CABytes) > 0 { - caFile, err = writeToFile(tlsBytes.CABytes, caFileName, path) - if err != nil { - return "", "", "", err + + if tlsSecret != nil { + if certData, exists := tlsSecret.Data[secrets.KeyTLSCert]; exists { + if keyData, keyExists := tlsSecret.Data[secrets.KeyTLSPrivateKey]; keyExists { + certFile, err = writeToFile(certData, certFileName, path) + if err != nil { + return "", "", "", err + } + keyFile, err = writeToFile(keyData, keyFileName, path) + if err != nil { + return "", "", "", err + } + } + } + + if caData, exists := tlsSecret.Data[secrets.KeyCACert]; exists { + caFile, err = writeToFile(caData, caFileName, path) + if err != nil { + return "", "", "", err + } } } + return certFile, keyFile, caFile, nil } diff --git a/internal/helm/getter/client_opts_test.go b/internal/helm/getter/client_opts_test.go index 91bcd32f8..bf40e7f86 100644 --- a/internal/helm/getter/client_opts_test.go +++ b/internal/helm/getter/client_opts_test.go @@ -19,6 +19,7 @@ package getter import ( "context" "os" + "strings" "testing" "time" @@ -29,7 +30,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" - helmv1 "github.com/fluxcd/source-controller/api/v1beta2" + helmv1 "github.com/fluxcd/source-controller/api/v1" ) func TestGetClientOpts(t *testing.T) { @@ -44,6 +45,7 @@ func TestGetClientOpts(t *testing.T) { authSecret *corev1.Secret afterFunc func(t *WithT, hcOpts *ClientOpts) oci bool + insecure bool err error }{ { @@ -63,7 +65,6 @@ func TestGetClientOpts(t *testing.T) { Data: map[string][]byte{ "username": []byte("user"), "password": []byte("pass"), - "caFile": []byte("invalid"), }, }, afterFunc: func(t *WithT, hcOpts *ClientOpts) { @@ -109,9 +110,27 @@ func TestGetClientOpts(t *testing.T) { t.Expect(err).ToNot(HaveOccurred()) t.Expect(config.Username).To(Equal("user")) t.Expect(config.Password).To(Equal("pass")) + t.Expect(hcOpts.Insecure).To(BeFalse()) }, oci: true, }, + { + name: "OCI HelmRepository with insecure repository", + authSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-oci", + }, + Data: map[string][]byte{ + "username": []byte("user"), + "password": []byte("pass"), + }, + }, + afterFunc: func(t *WithT, hcOpts *ClientOpts) { + t.Expect(hcOpts.Insecure).To(BeTrue()) + }, + oci: true, + insecure: true, + }, } for _, tt := range tests { @@ -123,6 +142,7 @@ func TestGetClientOpts(t *testing.T) { Timeout: &metav1.Duration{ Duration: time.Second, }, + Insecure: tt.insecure, }, } if tt.oci { @@ -166,6 +186,7 @@ func TestGetClientOpts_registryTLSLoginOption(t *testing.T) { certSecret *corev1.Secret authSecret *corev1.Secret loginOptsN int + wantErrMsg string }{ { name: "with valid caFile", @@ -186,7 +207,7 @@ func TestGetClientOpts_registryTLSLoginOption(t *testing.T) { "password": []byte("pass"), }, }, - loginOptsN: 2, + loginOptsN: 3, }, { name: "without caFile", @@ -205,7 +226,7 @@ func TestGetClientOpts_registryTLSLoginOption(t *testing.T) { "password": []byte("pass"), }, }, - loginOptsN: 1, + wantErrMsg: "must contain either 'ca.crt' or both 'tls.crt' and 'tls.key'", }, { name: "without cert secret", @@ -219,7 +240,7 @@ func TestGetClientOpts_registryTLSLoginOption(t *testing.T) { "password": []byte("pass"), }, }, - loginOptsN: 1, + loginOptsN: 2, }, } for _, tt := range tests { @@ -251,6 +272,17 @@ func TestGetClientOpts_registryTLSLoginOption(t *testing.T) { c := clientBuilder.Build() clientOpts, tmpDir, err := GetClientOpts(context.TODO(), c, helmRepo, "https://ghcr.io/dummy") + if tt.wantErrMsg != "" { + if err == nil { + t.Errorf("GetClientOpts() expected error but got none") + return + } + if !strings.Contains(err.Error(), tt.wantErrMsg) { + t.Errorf("GetClientOpts() expected error containing %q but got %v", tt.wantErrMsg, err) + return + } + return + } if err != nil { t.Errorf("GetClientOpts() error = %v", err) return @@ -260,7 +292,7 @@ func TestGetClientOpts_registryTLSLoginOption(t *testing.T) { } if tt.loginOptsN != len(clientOpts.RegLoginOpts) { // we should have a login option but no TLS option - t.Error("registryTLSLoginOption() != nil") + t.Errorf("expected length of %d for clientOpts.RegLoginOpts but got %d", tt.loginOptsN, len(clientOpts.RegLoginOpts)) return } }) diff --git a/internal/helm/getter/getter.go b/internal/helm/getter/getter.go deleted file mode 100644 index 18661da16..000000000 --- a/internal/helm/getter/getter.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package getter - -import ( - "fmt" - - "helm.sh/helm/v3/pkg/getter" - corev1 "k8s.io/api/core/v1" -) - -// GetterOptionsFromSecret constructs a getter.Option slice for the given secret. -// It returns the slice, or an error. -func GetterOptionsFromSecret(secret corev1.Secret) ([]getter.Option, error) { - var opts []getter.Option - basicAuth, err := basicAuthFromSecret(secret) - if err != nil { - return opts, err - } - if basicAuth != nil { - opts = append(opts, basicAuth) - } - return opts, nil -} - -// basicAuthFromSecret attempts to construct a basic auth getter.Option for the -// given v1.Secret and returns the result. -// -// Secrets with no username AND password are ignored, if only one is defined it -// returns an error. -func basicAuthFromSecret(secret corev1.Secret) (getter.Option, error) { - username, password := string(secret.Data["username"]), string(secret.Data["password"]) - switch { - case username == "" && password == "": - return nil, nil - case username == "" || password == "": - return nil, fmt.Errorf("invalid '%s' secret data: required fields 'username' and 'password'", secret.Name) - } - return getter.WithBasicAuth(username, password), nil -} diff --git a/internal/helm/getter/getter_test.go b/internal/helm/getter/getter_test.go deleted file mode 100644 index cffe0064f..000000000 --- a/internal/helm/getter/getter_test.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package getter - -import ( - "testing" - - corev1 "k8s.io/api/core/v1" -) - -var ( - basicAuthSecretFixture = corev1.Secret{ - Data: map[string][]byte{ - "username": []byte("user"), - "password": []byte("password"), - }, - } -) - -func TestGetterOptionsFromSecret(t *testing.T) { - tests := []struct { - name string - secrets []corev1.Secret - }{ - {"basic auth", []corev1.Secret{basicAuthSecretFixture}}, - {"empty", []corev1.Secret{}}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - secret := corev1.Secret{Data: map[string][]byte{}} - for _, s := range tt.secrets { - for k, v := range s.Data { - secret.Data[k] = v - } - } - - got, err := GetterOptionsFromSecret(secret) - if err != nil { - t.Errorf("ClientOptionsFromSecret() error = %v", err) - return - } - if len(got) != len(tt.secrets) { - t.Errorf("ClientOptionsFromSecret() options = %v, expected = %v", got, len(tt.secrets)) - } - }) - } -} - -func Test_basicAuthFromSecret(t *testing.T) { - tests := []struct { - name string - secret corev1.Secret - modify func(secret *corev1.Secret) - wantErr bool - wantNil bool - }{ - {"username and password", basicAuthSecretFixture, nil, false, false}, - {"without username", basicAuthSecretFixture, func(s *corev1.Secret) { delete(s.Data, "username") }, true, true}, - {"without password", basicAuthSecretFixture, func(s *corev1.Secret) { delete(s.Data, "password") }, true, true}, - {"empty", corev1.Secret{}, nil, false, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - secret := tt.secret.DeepCopy() - if tt.modify != nil { - tt.modify(secret) - } - got, err := basicAuthFromSecret(*secret) - if (err != nil) != tt.wantErr { - t.Errorf("BasicAuthFromSecret() error = %v, wantErr %v", err, tt.wantErr) - return - } - if tt.wantNil && got != nil { - t.Error("BasicAuthFromSecret() != nil") - return - } - }) - } -} diff --git a/internal/helm/registry/auth.go b/internal/helm/registry/auth.go index 1b9b3332f..c8b3ca6ae 100644 --- a/internal/helm/registry/auth.go +++ b/internal/helm/registry/auth.go @@ -23,6 +23,7 @@ import ( "github.com/docker/cli/cli/config" "github.com/docker/cli/cli/config/credentials" + "github.com/fluxcd/source-controller/internal/helm/common" "github.com/fluxcd/source-controller/internal/oci" "github.com/google/go-containerregistry/pkg/authn" "helm.sh/helm/v3/pkg/registry" @@ -95,7 +96,7 @@ func KeychainAdaptHelper(keyChain authn.Keychain) func(string) (registry.LoginOp if err != nil { return nil, fmt.Errorf("unable to parse registry URL '%s'", registryURL) } - authenticator, err := keyChain.Resolve(stringResource{parsedURL.Host}) + authenticator, err := keyChain.Resolve(common.StringResource{Registry: parsedURL.Host}) if err != nil { return nil, fmt.Errorf("unable to resolve credentials for registry '%s': %w", registryURL, err) } @@ -126,20 +127,6 @@ func AuthAdaptHelper(auth authn.Authenticator) (registry.LoginOption, error) { return registry.LoginOptBasicAuth(username, password), nil } -// stringResource is there to satisfy the github.com/google/go-containerregistry/pkg/authn.Resource interface. -// It merely wraps a given string and returns it for all of the interface's methods. -type stringResource struct { - registry string -} - -func (r stringResource) String() string { - return r.registry -} - -func (r stringResource) RegistryStr() string { - return r.registry -} - // NewLoginOption returns a registry login option for the given HelmRepository. // If the HelmRepository does not specify a secretRef, a nil login option is returned. func NewLoginOption(auth authn.Authenticator, keychain authn.Keychain, registryURL string) (registry.LoginOption, error) { diff --git a/internal/helm/registry/client.go b/internal/helm/registry/client.go index 8f2b315c2..5b89ea12e 100644 --- a/internal/helm/registry/client.go +++ b/internal/helm/registry/client.go @@ -69,10 +69,10 @@ func newClient(credentialsFile string, tlsConfig *tls.Config, insecureHTTP bool) opts = append(opts, registry.ClientOptPlainHTTP()) } if tlsConfig != nil { + t := http.DefaultTransport.(*http.Transport).Clone() + t.TLSClientConfig = tlsConfig opts = append(opts, registry.ClientOptHTTPClient(&http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsConfig, - }, + Transport: t, })) } if credentialsFile != "" { diff --git a/internal/helm/repository/chart_repository.go b/internal/helm/repository/chart_repository.go index 4908e8f36..e8030ec7b 100644 --- a/internal/helm/repository/chart_repository.go +++ b/internal/helm/repository/chart_repository.go @@ -28,6 +28,7 @@ import ( "os" "path" "sort" + "strings" "sync" "github.com/Masterminds/semver/v3" @@ -39,8 +40,9 @@ import ( "github.com/fluxcd/pkg/version" + "github.com/fluxcd/pkg/http/transport" "github.com/fluxcd/source-controller/internal/helm" - "github.com/fluxcd/source-controller/internal/transport" + "github.com/fluxcd/source-controller/internal/oci" ) var ( @@ -85,18 +87,24 @@ func IndexFromBytes(b []byte) (*repo.IndexFile, error) { return nil, repo.ErrNoAPIVersion } - for _, cvs := range i.Entries { + for name, cvs := range i.Entries { for idx := len(cvs) - 1; idx >= 0; idx-- { if cvs[idx] == nil { continue } + // When metadata section missing, initialize with no data + if cvs[idx].Metadata == nil { + cvs[idx].Metadata = &chart.Metadata{} + } if cvs[idx].APIVersion == "" { cvs[idx].APIVersion = chart.APIVersionV1 } - if err := cvs[idx].Validate(); err != nil { + if err := cvs[idx].Validate(); ignoreSkippableChartValidationError(err) != nil { cvs = append(cvs[:idx], cvs[idx+1:]...) } } + // adjust slice to only contain a set of valid versions + i.Entries[name] = cvs } i.SortEntries() @@ -291,13 +299,20 @@ func (r *ChartRepository) CacheIndex() error { return fmt.Errorf("failed to create temp file to cache index to: %w", err) } - if err = r.DownloadIndex(f); err != nil { + if err = r.DownloadIndex(f, helm.MaxIndexSize); err != nil { f.Close() - os.Remove(f.Name()) + removeErr := os.Remove(f.Name()) + if removeErr != nil { + err = errors.Join(err, removeErr) + } return fmt.Errorf("failed to cache index to temporary file: %w", err) } + if err = f.Close(); err != nil { - os.Remove(f.Name()) + removeErr := os.Remove(f.Name()) + if removeErr != nil { + err = errors.Join(err, removeErr) + } return fmt.Errorf("failed to close cached index file '%s': %w", f.Name(), err) } @@ -354,8 +369,10 @@ func (r *ChartRepository) LoadFromPath() error { // DownloadIndex attempts to download the chart repository index using // the Client and set Options, and writes the index to the given io.Writer. -// It returns an url.Error if the URL failed to parse. -func (r *ChartRepository) DownloadIndex(w io.Writer) (err error) { +// Upon download, the index is copied to the writer if the index size +// does not exceed the maximum index file size. Otherwise, it returns an error. +// A url.Error is returned if the URL failed to parse. +func (r *ChartRepository) DownloadIndex(w io.Writer, maxSize int64) (err error) { r.RLock() defer r.RUnlock() @@ -375,6 +392,11 @@ func (r *ChartRepository) DownloadIndex(w io.Writer) (err error) { if err != nil { return err } + + if int64(res.Len()) > maxSize { + return fmt.Errorf("index exceeds the maximum index file size of %d bytes", maxSize) + } + if _, err = io.Copy(w, res); err != nil { return err } @@ -465,9 +487,9 @@ func (r *ChartRepository) invalidate() { // VerifyChart verifies the chart against a signature. // It returns an error on failure. -func (r *ChartRepository) VerifyChart(_ context.Context, _ *repo.ChartVersion) error { +func (r *ChartRepository) VerifyChart(_ context.Context, _ *repo.ChartVersion) (oci.VerificationResult, error) { // this is a no-op because this is not implemented yet. - return fmt.Errorf("not implemented") + return oci.VerificationResultIgnored, fmt.Errorf("not implemented") } // jsonOrYamlUnmarshal unmarshals the given byte slice containing JSON or YAML @@ -486,3 +508,25 @@ func jsonOrYamlUnmarshal(b []byte, i interface{}) error { } return yaml.UnmarshalStrict(b, i) } + +// ignoreSkippableChartValidationError inspect the given error and returns nil if +// the error isn't important for index loading +// +// In particular, charts may introduce validations that don't impact repository indexes +// And repository indexes may be generated by older/non-complient software, which doesn't +// conform to all validations. +// +// this code is taken from https://github.com/helm/helm/blob/v3.15.2/pkg/repo/index.go#L402 +func ignoreSkippableChartValidationError(err error) error { + verr, ok := err.(chart.ValidationError) + if !ok { + return err + } + + // https://github.com/helm/helm/issues/12748 (JFrog repository strips alias field from index) + if strings.HasPrefix(verr.Error(), "validation: more than one dependency with name or alias") { + return nil + } + + return err +} diff --git a/internal/helm/repository/chart_repository_test.go b/internal/helm/repository/chart_repository_test.go index d67fe0eac..1b2f1c0fb 100644 --- a/internal/helm/repository/chart_repository_test.go +++ b/internal/helm/repository/chart_repository_test.go @@ -444,11 +444,19 @@ func TestChartRepository_DownloadIndex(t *testing.T) { RWMutex: &sync.RWMutex{}, } - buf := bytes.NewBuffer([]byte{}) - g.Expect(r.DownloadIndex(buf)).To(Succeed()) - g.Expect(buf.Bytes()).To(Equal(b)) - g.Expect(mg.LastCalledURL).To(Equal(r.URL + "/index.yaml")) - g.Expect(err).To(BeNil()) + t.Run("download index", func(t *testing.T) { + buf := bytes.NewBuffer([]byte{}) + g.Expect(r.DownloadIndex(buf, helm.MaxIndexSize)).To(Succeed()) + g.Expect(buf.Bytes()).To(Equal(b)) + g.Expect(mg.LastCalledURL).To(Equal(r.URL + "/index.yaml")) + g.Expect(err).To(BeNil()) + }) + + t.Run("download index size error", func(t *testing.T) { + buf := bytes.NewBuffer([]byte{}) + g.Expect(r.DownloadIndex(buf, int64(len(b)-1))).To(HaveOccurred()) + g.Expect(mg.LastCalledURL).To(Equal(r.URL + "/index.yaml")) + }) } func TestChartRepository_StrategicallyLoadIndex(t *testing.T) { @@ -664,7 +672,7 @@ func verifyLocalIndex(t *testing.T, i *repo.IndexFile) { g := NewWithT(t) g.Expect(i.Entries).ToNot(BeNil()) - g.Expect(i.Entries).To(HaveLen(3), "expected 3 entries in index file") + g.Expect(i.Entries).To(HaveLen(4), "expected 4 entries in index file") alpine, ok := i.Entries["alpine"] g.Expect(ok).To(BeTrue(), "expected 'alpine' entry to exist") @@ -674,6 +682,10 @@ func verifyLocalIndex(t *testing.T, i *repo.IndexFile) { g.Expect(ok).To(BeTrue(), "expected 'nginx' entry to exist") g.Expect(nginx).To(HaveLen(2), "'nginx' should have 2 entries") + broken, ok := i.Entries["xChartWithDuplicateDependenciesAndMissingAlias"] + g.Expect(ok).To(BeTrue(), "expected 'xChartWithDuplicateDependenciesAndMissingAlias' entry to exist") + g.Expect(broken).To(HaveLen(1), "'xChartWithDuplicateDependenciesAndMissingAlias' should have 1 entries") + expects := []*repo.ChartVersion{ { Metadata: &chart.Metadata{ @@ -715,8 +727,24 @@ func verifyLocalIndex(t *testing.T, i *repo.IndexFile) { }, Digest: "sha256:1234567890abcdef", }, + { + Metadata: &chart.Metadata{ + Name: "xChartWithDuplicateDependenciesAndMissingAlias", + Description: "string", + Version: "1.2.3", + Keywords: []string{"broken", "still accepted"}, + Home: "https://example.com/something", + Dependencies: []*chart.Dependency{ + {Name: "kube-rbac-proxy", Version: "0.9.1"}, + }, + }, + URLs: []string{ + "https://kubernetes-charts.storage.googleapis.com/nginx-1.2.3.tgz", + }, + Digest: "sha256:1234567890abcdef", + }, } - tests := []*repo.ChartVersion{alpine[0], nginx[0], nginx[1]} + tests := []*repo.ChartVersion{alpine[0], nginx[0], nginx[1], broken[0]} for i, tt := range tests { expect := expects[i] @@ -727,5 +755,129 @@ func verifyLocalIndex(t *testing.T, i *repo.IndexFile) { g.Expect(tt.Home).To(Equal(expect.Home)) g.Expect(tt.URLs).To(ContainElements(expect.URLs)) g.Expect(tt.Keywords).To(ContainElements(expect.Keywords)) + g.Expect(tt.Dependencies).To(ContainElements(expect.Dependencies)) + } +} + +// This code is taken from https://github.com/helm/helm/blob/v3.15.2/pkg/repo/index_test.go#L601 +// and refers to: https://github.com/helm/helm/issues/12748 +func TestIgnoreSkippableChartValidationError(t *testing.T) { + type TestCase struct { + Input error + ErrorSkipped bool + } + testCases := map[string]TestCase{ + "nil": { + Input: nil, + }, + "generic_error": { + Input: fmt.Errorf("foo"), + }, + "non_skipped_validation_error": { + Input: chart.ValidationError("chart.metadata.type must be application or library"), + }, + "skipped_validation_error": { + Input: chart.ValidationErrorf("more than one dependency with name or alias %q", "foo"), + ErrorSkipped: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result := ignoreSkippableChartValidationError(tc.Input) + + if tc.Input == nil { + if result != nil { + t.Error("expected nil result for nil input") + } + return + } + + if tc.ErrorSkipped { + if result != nil { + t.Error("expected nil result for skipped error") + } + return + } + + if tc.Input != result { + t.Error("expected the result equal to input") + } + + }) + } +} + +var indexWithFirstVersionInvalid = ` +apiVersion: v1 +entries: + nginx: + - urls: + - https://charts.helm.sh/stable/alpine-1.0.0.tgz + - http://storage2.googleapis.com/kubernetes-charts/alpine-1.0.0.tgz + name: nginx + version: 0..1.0 + description: string + home: https://github.com/something + digest: "sha256:1234567890abcdef" + - urls: + - https://charts.helm.sh/stable/nginx-0.2.0.tgz + name: nginx + description: string + version: 0.2.0 + home: https://github.com/something/else + digest: "sha256:1234567890abcdef" +` +var indexWithLastVersionInvalid = ` +apiVersion: v1 +entries: + nginx: + - urls: + - https://charts.helm.sh/stable/nginx-0.2.0.tgz + name: nginx + description: string + version: 0.2.0 + home: https://github.com/something/else + digest: "sha256:1234567890abcdef" + - urls: + - https://charts.helm.sh/stable/alpine-1.0.0.tgz + - http://storage2.googleapis.com/kubernetes-charts/alpine-1.0.0.tgz + name: nginx + version: 0..1.0 + description: string + home: https://github.com/something + digest: "sha256:1234567890abcdef" +` + +func TestIndexFromBytes_InvalidEntries(t *testing.T) { + tests := []struct { + source string + data string + }{ + { + source: "indexWithFirstVersionInvalid", + data: indexWithFirstVersionInvalid, + }, + { + source: "indexWithLastVersionInvalid", + data: indexWithLastVersionInvalid, + }, + } + for _, tc := range tests { + t.Run(tc.source, func(t *testing.T) { + idx, err := IndexFromBytes([]byte(tc.data)) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + cvs := idx.Entries["nginx"] + if len(cvs) == 0 { + t.Error("expected one chart version not to be filtered out") + } + for _, v := range cvs { + if v.Version == "0..1.0" { + t.Error("malformed version was not filtered out") + } + } + }) } } diff --git a/internal/helm/repository/oci_chart_repository.go b/internal/helm/repository/oci_chart_repository.go index 89798b5dc..2bed964a2 100644 --- a/internal/helm/repository/oci_chart_repository.go +++ b/internal/helm/repository/oci_chart_repository.go @@ -36,9 +36,9 @@ import ( "github.com/Masterminds/semver/v3" "github.com/google/go-containerregistry/pkg/name" + "github.com/fluxcd/pkg/http/transport" "github.com/fluxcd/pkg/version" "github.com/fluxcd/source-controller/internal/oci" - "github.com/fluxcd/source-controller/internal/transport" ) // RegistryClient is an interface for interacting with OCI registries @@ -357,15 +357,16 @@ func getLastMatchingVersionOrConstraint(cvs []string, ver string) (string, error } // VerifyChart verifies the chart against a signature. -// If no signature is provided, a keyless verification is performed. -// It returns an error on failure. -func (r *OCIChartRepository) VerifyChart(ctx context.Context, chart *repo.ChartVersion) error { +// Supports signature verification using either cosign or notation providers. +// If no signature is provided, when cosign is used, a keyless verification is performed. +// The verification result is returned as a VerificationResult and any error encountered. +func (r *OCIChartRepository) VerifyChart(ctx context.Context, chart *repo.ChartVersion) (oci.VerificationResult, error) { if len(r.verifiers) == 0 { - return fmt.Errorf("no verifiers available") + return oci.VerificationResultFailed, fmt.Errorf("no verifiers available") } if len(chart.URLs) == 0 { - return fmt.Errorf("chart '%s' has no downloadable URLs", chart.Name) + return oci.VerificationResultFailed, fmt.Errorf("chart '%s' has no downloadable URLs", chart.Name) } var nameOpts []name.Option @@ -375,17 +376,26 @@ func (r *OCIChartRepository) VerifyChart(ctx context.Context, chart *repo.ChartV ref, err := name.ParseReference(strings.TrimPrefix(chart.URLs[0], fmt.Sprintf("%s://", registry.OCIScheme)), nameOpts...) if err != nil { - return fmt.Errorf("invalid chart reference: %s", err) + return oci.VerificationResultFailed, fmt.Errorf("invalid chart reference: %s", err) } + verificationResult := oci.VerificationResultFailed + // verify the chart for _, verifier := range r.verifiers { - if verified, err := verifier.Verify(ctx, ref); err != nil { - return fmt.Errorf("failed to verify %s: %w", chart.URLs[0], err) - } else if verified { - return nil + result, err := verifier.Verify(ctx, ref) + if err != nil { + return result, fmt.Errorf("failed to verify %s: %w", chart.URLs[0], err) } + if result == oci.VerificationResultSuccess { + return result, nil + } + verificationResult = result + } + + if verificationResult == oci.VerificationResultIgnored { + return verificationResult, nil } - return fmt.Errorf("no matching signatures were found for '%s'", ref.Name()) + return oci.VerificationResultFailed, fmt.Errorf("no matching signatures were found for '%s'", ref.Name()) } diff --git a/internal/helm/repository/repository.go b/internal/helm/repository/repository.go index 5fdf62bfa..6cee5f658 100644 --- a/internal/helm/repository/repository.go +++ b/internal/helm/repository/repository.go @@ -21,6 +21,8 @@ import ( "context" "helm.sh/helm/v3/pkg/repo" + + "github.com/fluxcd/source-controller/internal/oci" ) // Downloader is used to download a chart from a remote Helm repository or OCI Helm repository. @@ -31,7 +33,7 @@ type Downloader interface { // DownloadChart downloads a chart from the remote Helm repository or OCI Helm repository. DownloadChart(chart *repo.ChartVersion) (*bytes.Buffer, error) // VerifyChart verifies the chart against a signature. - VerifyChart(ctx context.Context, chart *repo.ChartVersion) error + VerifyChart(ctx context.Context, chart *repo.ChartVersion) (oci.VerificationResult, error) // Clear removes all temporary files created by the downloader, caching the files if the cache is configured, // and calling garbage collector to remove unused files. Clear() error diff --git a/internal/helm/testdata/chartmuseum-index.json b/internal/helm/testdata/chartmuseum-index.json index 745617e30..15ba3e704 100644 --- a/internal/helm/testdata/chartmuseum-index.json +++ b/internal/helm/testdata/chartmuseum-index.json @@ -77,6 +77,36 @@ "created": "0001-01-01T00:00:00Z", "digest": "sha256:1234567890abcdef" } + ], + "xChartWithDuplicateDependenciesAndMissingAlias": [ + { + "name": "xChartWithDuplicateDependenciesAndMissingAlias", + "home": "https://example.com/something", + "version": "1.2.3", + "description": "string", + "keywords": [ + "broken", + "still accepted" + ], + "apiVersion": "v1", + "dependencies": [ + { + "name": "kube-rbac-proxy", + "version": "0.9.1", + "repository": "" + }, + { + "name": "kube-rbac-proxy", + "version": "0.9.1", + "repository": "" + } + ], + "urls": [ + "https://kubernetes-charts.storage.googleapis.com/nginx-1.2.3.tgz" + ], + "created": "0001-01-01T00:00:00Z", + "digest": "sha256:1234567890abcdef" + } ] } } diff --git a/internal/helm/testdata/chartmuseum-index.yaml b/internal/helm/testdata/chartmuseum-index.yaml index 3077596f4..ab00c1807 100644 --- a/internal/helm/testdata/chartmuseum-index.yaml +++ b/internal/helm/testdata/chartmuseum-index.yaml @@ -48,3 +48,19 @@ entries: - small - sumtin digest: "sha256:1234567890abcdef" + xChartWithDuplicateDependenciesAndMissingAlias: + - name: xChartWithDuplicateDependenciesAndMissingAlias + description: string + version: 1.2.3 + home: https://example.com/something + keywords: + - broken + - still accepted + urls: + - https://kubernetes-charts.storage.googleapis.com/nginx-1.2.3.tgz + digest: "sha256:1234567890abcdef" + dependencies: + - name: kube-rbac-proxy + version: "0.9.1" + - name: kube-rbac-proxy + version: "0.9.1" diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/.helmignore b/internal/helm/testdata/charts/helmchartwithdepsnorepo/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/Chart.lock b/internal/helm/testdata/charts/helmchartwithdepsnorepo/Chart.lock new file mode 100644 index 000000000..83401ac65 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/Chart.lock @@ -0,0 +1,12 @@ +dependencies: +- name: helmchart + repository: file://../helmchart + version: 0.1.0 +- name: helmchart + repository: file://../helmchart + version: 0.1.0 +- name: grafana + repository: https://grafana.github.io/helm-charts + version: 6.17.4 +digest: sha256:1e41c97e27347f433ff0212bf52c344bc82dd435f70129d15e96cd2c8fcc32bb +generated: "2021-11-02T01:25:59.624290788+01:00" diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/Chart.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/Chart.yaml new file mode 100644 index 000000000..1e32b80ca --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/Chart.yaml @@ -0,0 +1,28 @@ +apiVersion: v2 +name: helmchartwithdeps +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. +appVersion: 1.16.0 + +dependencies: + - name: helmchart + version: "0.1.0" + - name: helmchart + alias: aliased + version: "0.1.0" diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/.helmignore b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/Chart.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/Chart.yaml new file mode 100644 index 000000000..46eaf150b --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +name: helmchart +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. +appVersion: 1.16.0 diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/NOTES.txt b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/NOTES.txt new file mode 100644 index 000000000..741a77d8e --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/NOTES.txt @@ -0,0 +1,21 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "helmchart.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "helmchart.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "helmchart.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "helmchart.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/_helpers.tpl b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/_helpers.tpl new file mode 100644 index 000000000..f6431fcb2 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "helmchart.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "helmchart.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "helmchart.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "helmchart.labels" -}} +helm.sh/chart: {{ include "helmchart.chart" . }} +{{ include "helmchart.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "helmchart.selectorLabels" -}} +app.kubernetes.io/name: {{ include "helmchart.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "helmchart.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "helmchart.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/deployment.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/deployment.yaml new file mode 100644 index 000000000..daa9f8e56 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/deployment.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "helmchart.fullname" . }} + labels: + {{- include "helmchart.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "helmchart.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "helmchart.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "helmchart.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/ingress.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/ingress.yaml new file mode 100644 index 000000000..c2069e9c8 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/ingress.yaml @@ -0,0 +1,41 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "helmchart.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "helmchart.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} +{{- end }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/service.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/service.yaml new file mode 100644 index 000000000..12e16ef71 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "helmchart.fullname" . }} + labels: + {{- include "helmchart.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "helmchart.selectorLabels" . | nindent 4 }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/serviceaccount.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/serviceaccount.yaml new file mode 100644 index 000000000..da3512648 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/serviceaccount.yaml @@ -0,0 +1,8 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "helmchart.serviceAccountName" . }} + labels: +{{ include "helmchart.labels" . | nindent 4 }} +{{- end -}} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/tests/test-connection.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/tests/test-connection.yaml new file mode 100644 index 000000000..11b0b1a96 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "helmchart.fullname" . }}-test-connection" + labels: +{{ include "helmchart.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test-success +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "helmchart.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/values-prod.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/values-prod.yaml new file mode 100644 index 000000000..5ef7832ca --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/values-prod.yaml @@ -0,0 +1 @@ +replicaCount: 2 diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/values.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/values.yaml new file mode 100644 index 000000000..40e7aa0b6 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/charts/helmchart/values.yaml @@ -0,0 +1,66 @@ +# Default values for helmchart. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/NOTES.txt b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/NOTES.txt new file mode 100644 index 000000000..105423d28 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/NOTES.txt @@ -0,0 +1,21 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "helmchartwithdeps.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "helmchartwithdeps.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "helmchartwithdeps.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "helmchartwithdeps.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80 +{{- end }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/_helpers.tpl b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/_helpers.tpl new file mode 100644 index 000000000..a718f8b32 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "helmchartwithdeps.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "helmchartwithdeps.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "helmchartwithdeps.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "helmchartwithdeps.labels" -}} +helm.sh/chart: {{ include "helmchartwithdeps.chart" . }} +{{ include "helmchartwithdeps.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "helmchartwithdeps.selectorLabels" -}} +app.kubernetes.io/name: {{ include "helmchartwithdeps.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "helmchartwithdeps.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "helmchartwithdeps.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/deployment.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/deployment.yaml new file mode 100644 index 000000000..08f62c740 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/deployment.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "helmchartwithdeps.fullname" . }} + labels: + {{- include "helmchartwithdeps.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "helmchartwithdeps.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "helmchartwithdeps.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "helmchartwithdeps.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/ingress.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/ingress.yaml new file mode 100644 index 000000000..6c1b03148 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/ingress.yaml @@ -0,0 +1,41 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "helmchartwithdeps.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "helmchartwithdeps.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + backend: + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} +{{- end }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/service.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/service.yaml new file mode 100644 index 000000000..2c270c67b --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "helmchartwithdeps.fullname" . }} + labels: + {{- include "helmchartwithdeps.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "helmchartwithdeps.selectorLabels" . | nindent 4 }} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/serviceaccount.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/serviceaccount.yaml new file mode 100644 index 000000000..2eec29c55 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/serviceaccount.yaml @@ -0,0 +1,8 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "helmchartwithdeps.serviceAccountName" . }} + labels: +{{ include "helmchartwithdeps.labels" . | nindent 4 }} +{{- end -}} diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/tests/test-connection.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/tests/test-connection.yaml new file mode 100644 index 000000000..bbcd09201 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "helmchartwithdeps.fullname" . }}-test-connection" + labels: +{{ include "helmchartwithdeps.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test-success +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "helmchartwithdeps.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/internal/helm/testdata/charts/helmchartwithdepsnorepo/values.yaml b/internal/helm/testdata/charts/helmchartwithdepsnorepo/values.yaml new file mode 100644 index 000000000..8213f28c1 --- /dev/null +++ b/internal/helm/testdata/charts/helmchartwithdepsnorepo/values.yaml @@ -0,0 +1,66 @@ +# Default values for helmchartwithdeps. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: [] + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/internal/helm/testdata/local-index-unordered.yaml b/internal/helm/testdata/local-index-unordered.yaml index 7482baaae..91ad62f1e 100644 --- a/internal/helm/testdata/local-index-unordered.yaml +++ b/internal/helm/testdata/local-index-unordered.yaml @@ -46,3 +46,19 @@ entries: - small - sumtin digest: "sha256:1234567890abcdef" + xChartWithDuplicateDependenciesAndMissingAlias: + - name: xChartWithDuplicateDependenciesAndMissingAlias + description: string + version: 1.2.3 + home: https://example.com/something + keywords: + - broken + - still accepted + urls: + - https://kubernetes-charts.storage.googleapis.com/nginx-1.2.3.tgz + digest: "sha256:1234567890abcdef" + dependencies: + - name: kube-rbac-proxy + version: "0.9.1" + - name: kube-rbac-proxy + version: "0.9.1" diff --git a/internal/helm/testdata/local-index.yaml b/internal/helm/testdata/local-index.yaml index e680d2a3e..56c0ac2c3 100644 --- a/internal/helm/testdata/local-index.yaml +++ b/internal/helm/testdata/local-index.yaml @@ -46,3 +46,19 @@ entries: - small - sumtin digest: "sha256:1234567890abcdef" + xChartWithDuplicateDependenciesAndMissingAlias: + - name: xChartWithDuplicateDependenciesAndMissingAlias + description: string + version: 1.2.3 + home: https://example.com/something + keywords: + - broken + - still accepted + urls: + - https://kubernetes-charts.storage.googleapis.com/nginx-1.2.3.tgz + digest: "sha256:1234567890abcdef" + dependencies: + - name: kube-rbac-proxy + version: "0.9.1" + - name: kube-rbac-proxy + version: "0.9.1" diff --git a/internal/index/digest_test.go b/internal/index/digest_test.go index 8afc4fd09..531bb9329 100644 --- a/internal/index/digest_test.go +++ b/internal/index/digest_test.go @@ -49,6 +49,13 @@ func TestWithIndex(t *testing.T) { g.Expect(d.digests).To(BeEmpty()) }) + + t.Run("handles nil index", func(t *testing.T) { + g := NewWithT(t) + d := &Digester{} + WithIndex(nil)(d) + g.Expect(d.index).To(BeNil()) + }) } func TestNewDigester(t *testing.T) { @@ -107,6 +114,13 @@ func TestDigester_Add(t *testing.T) { g.Expect(d.digests).To(BeEmpty()) }) + + t.Run("adds empty key and value", func(t *testing.T) { + g := NewWithT(t) + d := NewDigester() + d.Add("", "") + g.Expect(d.index).To(HaveKeyWithValue("", "")) + }) } func TestDigester_Delete(t *testing.T) { @@ -138,6 +152,14 @@ func TestDigester_Delete(t *testing.T) { d.Delete("foo") g.Expect(d.digests).To(BeEmpty()) }) + + t.Run("deletes non-existent key without error", func(t *testing.T) { + g := NewWithT(t) + d := NewDigester() + d.Delete("non-existent") + g.Expect(d.index).To(BeEmpty()) + g.Expect(d.digests).To(BeEmpty()) + }) } func TestDigester_Get(t *testing.T) { @@ -161,17 +183,26 @@ func TestDigester_Has(t *testing.T) { } func TestDigester_Index(t *testing.T) { - g := NewWithT(t) + t.Run("returns a copy of the index", func(t *testing.T) { + g := NewWithT(t) - i := map[string]string{ - "foo": "bar", - "bar": "baz", - } - d := NewDigester(WithIndex(i)) + i := map[string]string{ + "foo": "bar", + "bar": "baz", + } + d := NewDigester(WithIndex(i)) - iCopy := d.Index() - g.Expect(iCopy).To(Equal(i)) - g.Expect(iCopy).ToNot(BeIdenticalTo(i)) + iCopy := d.Index() + g.Expect(iCopy).To(Equal(i)) + g.Expect(iCopy).ToNot(BeIdenticalTo(i)) + }) + + t.Run("returns an empty copy for an empty index", func(t *testing.T) { + g := NewWithT(t) + d := NewDigester() + emptyIndex := d.Index() + g.Expect(emptyIndex).To(BeEmpty()) + }) } func TestDigester_Len(t *testing.T) { @@ -183,6 +214,8 @@ func TestDigester_Len(t *testing.T) { })) g.Expect(d.Len()).To(Equal(2)) + + g.Expect(NewDigester().Len()).To(Equal(0)) } func TestDigester_String(t *testing.T) { @@ -196,6 +229,8 @@ func TestDigester_String(t *testing.T) { g.Expect(d.String()).To(Equal(`bar baz foo bar `)) + + g.Expect(NewDigester().String()).To(Equal("")) } func TestDigester_WriteTo(t *testing.T) { diff --git a/internal/object/object.go b/internal/object/object.go index 105b40330..37f8ef9fe 100644 --- a/internal/object/object.go +++ b/internal/object/object.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - sourcev1 "github.com/fluxcd/source-controller/api/v1" + "github.com/fluxcd/pkg/apis/meta" ) var ( @@ -148,7 +148,7 @@ func SetSuspend(obj runtime.Object, val bool) error { } // GetArtifact returns the status.artifact of a given runtime object. -func GetArtifact(obj runtime.Object) (*sourcev1.Artifact, error) { +func GetArtifact(obj runtime.Object) (*meta.Artifact, error) { u, err := toUnstructured(obj) if err != nil { return nil, err @@ -165,7 +165,7 @@ func GetArtifact(obj runtime.Object) (*sourcev1.Artifact, error) { if err != nil { return nil, err } - outArtifact := &sourcev1.Artifact{} + outArtifact := &meta.Artifact{} if err := json.Unmarshal(enc, outArtifact); err != nil { return nil, err } diff --git a/internal/object/object_test.go b/internal/object/object_test.go index 91932d11d..35cab3303 100644 --- a/internal/object/object_test.go +++ b/internal/object/object_test.go @@ -24,6 +24,8 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/fluxcd/pkg/apis/meta" + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) @@ -127,7 +129,7 @@ func TestGetArtifact(t *testing.T) { g.Expect(artifact).To(BeNil()) // Get set artifact value. - obj.Status.Artifact = &sourcev1.Artifact{Path: "aaa", Revision: "zzz"} + obj.Status.Artifact = &meta.Artifact{Path: "aaa", Revision: "zzz"} artifact, err = GetArtifact(obj) g.Expect(err).ToNot(HaveOccurred()) g.Expect(artifact).ToNot(BeNil()) diff --git a/internal/oci/auth.go b/internal/oci/auth.go index 7b3eab896..6bd35c59e 100644 --- a/internal/oci/auth.go +++ b/internal/oci/auth.go @@ -18,14 +18,14 @@ package oci import ( "context" - "fmt" "strings" - "github.com/fluxcd/pkg/oci/auth/login" "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + "github.com/fluxcd/pkg/auth" + authutils "github.com/fluxcd/pkg/auth/utils" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) // Anonymous is an authn.AuthConfig that always returns an anonymous @@ -40,22 +40,7 @@ func (a Anonymous) Resolve(_ authn.Resource) (authn.Authenticator, error) { } // OIDCAuth generates the OIDC credential authenticator based on the specified cloud provider. -func OIDCAuth(ctx context.Context, url, provider string) (authn.Authenticator, error) { +func OIDCAuth(ctx context.Context, url, provider string, opts ...auth.Option) (authn.Authenticator, error) { u := strings.TrimPrefix(url, sourcev1.OCIRepositoryPrefix) - ref, err := name.ParseReference(u) - if err != nil { - return nil, fmt.Errorf("failed to parse URL '%s': %w", u, err) - } - - opts := login.ProviderOptions{} - switch provider { - case sourcev1.AmazonOCIProvider: - opts.AwsAutoLogin = true - case sourcev1.AzureOCIProvider: - opts.AzureAutoLogin = true - case sourcev1.GoogleOCIProvider: - opts.GcpAutoLogin = true - } - - return login.NewManager().Login(ctx, u, ref, opts) + return authutils.GetArtifactRegistryCredentials(ctx, provider, u, opts...) } diff --git a/internal/oci/cosign/cosign.go b/internal/oci/cosign/cosign.go new file mode 100644 index 000000000..75af33091 --- /dev/null +++ b/internal/oci/cosign/cosign.go @@ -0,0 +1,162 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cosign + +import ( + "context" + "crypto" + "fmt" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio" + coptions "github.com/sigstore/cosign/v2/cmd/cosign/cli/options" + "github.com/sigstore/cosign/v2/cmd/cosign/cli/rekor" + "github.com/sigstore/cosign/v2/pkg/cosign" + ociremote "github.com/sigstore/cosign/v2/pkg/oci/remote" + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/signature" + + soci "github.com/fluxcd/source-controller/internal/oci" +) + +// options is a struct that holds options for verifier. +type options struct { + publicKey []byte + rOpt []remote.Option + identities []cosign.Identity +} + +// Options is a function that configures the options applied to a Verifier. +type Options func(opts *options) + +// WithPublicKey sets the public key. +func WithPublicKey(publicKey []byte) Options { + return func(opts *options) { + opts.publicKey = publicKey + } +} + +// WithRemoteOptions is a functional option for overriding the default +// remote options used by the verifier. +func WithRemoteOptions(opts ...remote.Option) Options { + return func(o *options) { + o.rOpt = opts + } +} + +// WithIdentities specifies the identity matchers that have to be met +// for the signature to be deemed valid. +func WithIdentities(identities []cosign.Identity) Options { + return func(opts *options) { + opts.identities = identities + } +} + +// CosignVerifier is a struct which is responsible for executing verification logic. +type CosignVerifier struct { + opts *cosign.CheckOpts +} + +// NewCosignVerifier initializes a new CosignVerifier. +func NewCosignVerifier(ctx context.Context, opts ...Options) (*CosignVerifier, error) { + o := options{} + for _, opt := range opts { + opt(&o) + } + + checkOpts := &cosign.CheckOpts{} + + ro := coptions.RegistryOptions{} + co, err := ro.ClientOpts(ctx) + if err != nil { + return nil, err + } + + checkOpts.Identities = o.identities + if o.rOpt != nil { + co = append(co, ociremote.WithRemoteOptions(o.rOpt...)) + } + + checkOpts.RegistryClientOpts = co + + // If a public key is provided, it will use it to verify the signature. + // If there is no public key provided, it will try keyless verification. + // https://github.com/sigstore/cosign/blob/main/KEYLESS.md. + if len(o.publicKey) > 0 { + checkOpts.Offline = true + // TODO(hidde): this is an oversight in our implementation. As it is + // theoretically possible to have a custom PK, without disabling tlog. + checkOpts.IgnoreTlog = true + + pubKeyRaw, err := cryptoutils.UnmarshalPEMToPublicKey(o.publicKey) + if err != nil { + return nil, err + } + + checkOpts.SigVerifier, err = signature.LoadVerifier(pubKeyRaw, crypto.SHA256) + if err != nil { + return nil, err + } + } else { + checkOpts.RekorClient, err = rekor.NewClient(coptions.DefaultRekorURL) + if err != nil { + return nil, fmt.Errorf("unable to create Rekor client: %w", err) + } + + // This performs an online fetch of the Rekor public keys, but this is needed + // for verifying tlog entries (both online and offline). + // TODO(hidde): above note is important to keep in mind when we implement + // "offline" tlog above. + if checkOpts.RekorPubKeys, err = cosign.GetRekorPubs(ctx); err != nil { + return nil, fmt.Errorf("unable to get Rekor public keys: %w", err) + } + + checkOpts.CTLogPubKeys, err = cosign.GetCTLogPubs(ctx) + if err != nil { + return nil, fmt.Errorf("unable to get CTLog public keys: %w", err) + } + + if checkOpts.RootCerts, err = fulcio.GetRoots(); err != nil { + return nil, fmt.Errorf("unable to get Fulcio root certs: %w", err) + } + + if checkOpts.IntermediateCerts, err = fulcio.GetIntermediates(); err != nil { + return nil, fmt.Errorf("unable to get Fulcio intermediate certs: %w", err) + } + } + + return &CosignVerifier{ + opts: checkOpts, + }, nil +} + +// Verify verifies the authenticity of the given ref OCI image. +// It returns a boolean indicating if the verification was successful. +// It returns an error if the verification fails, nil otherwise. +func (v *CosignVerifier) Verify(ctx context.Context, ref name.Reference) (soci.VerificationResult, error) { + signatures, _, err := cosign.VerifyImageSignatures(ctx, ref, v.opts) + if err != nil { + return soci.VerificationResultFailed, err + } + + if len(signatures) == 0 { + return soci.VerificationResultFailed, nil + } + + return soci.VerificationResultSuccess, nil +} diff --git a/internal/oci/verifier_test.go b/internal/oci/cosign/cosign_test.go similarity index 54% rename from internal/oci/verifier_test.go rename to internal/oci/cosign/cosign_test.go index 114601616..f99e7d1f6 100644 --- a/internal/oci/verifier_test.go +++ b/internal/oci/cosign/cosign_test.go @@ -14,16 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -package oci +package cosign import ( + "context" + "fmt" "net/http" + "net/url" "reflect" "testing" "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" "github.com/google/go-containerregistry/pkg/v1/remote" + . "github.com/onsi/gomega" "github.com/sigstore/cosign/v2/pkg/cosign" + + testproxy "github.com/fluxcd/source-controller/tests/proxy" + testregistry "github.com/fluxcd/source-controller/tests/registry" ) func TestOptions(t *testing.T) { @@ -38,15 +46,15 @@ func TestOptions(t *testing.T) { name: "signature option", opts: []Options{WithPublicKey([]byte("foo"))}, want: &options{ - PublicKey: []byte("foo"), - ROpt: nil, + publicKey: []byte("foo"), + rOpt: nil, }, }, { name: "keychain option", opts: []Options{WithRemoteOptions(remote.WithAuthFromKeychain(authn.DefaultKeychain))}, want: &options{ - PublicKey: nil, - ROpt: []remote.Option{remote.WithAuthFromKeychain(authn.DefaultKeychain)}, + publicKey: nil, + rOpt: []remote.Option{remote.WithAuthFromKeychain(authn.DefaultKeychain)}, }, }, { name: "keychain and authenticator option", @@ -55,8 +63,8 @@ func TestOptions(t *testing.T) { remote.WithAuthFromKeychain(authn.DefaultKeychain), )}, want: &options{ - PublicKey: nil, - ROpt: []remote.Option{ + publicKey: nil, + rOpt: []remote.Option{ remote.WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), remote.WithAuthFromKeychain(authn.DefaultKeychain), }, @@ -69,8 +77,8 @@ func TestOptions(t *testing.T) { remote.WithTransport(http.DefaultTransport), )}, want: &options{ - PublicKey: nil, - ROpt: []remote.Option{ + publicKey: nil, + rOpt: []remote.Option{ remote.WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), remote.WithAuthFromKeychain(authn.DefaultKeychain), remote.WithTransport(http.DefaultTransport), @@ -89,7 +97,7 @@ func TestOptions(t *testing.T) { }, })}, want: &options{ - Identities: []cosign.Identity{ + identities: []cosign.Identity{ { SubjectRegExp: "test-user", IssuerRegExp: "^https://token.actions.githubusercontent.com$", @@ -109,22 +117,77 @@ func TestOptions(t *testing.T) { for _, opt := range test.opts { opt(&o) } - if !reflect.DeepEqual(o.PublicKey, test.want.PublicKey) { - t.Errorf("got %#v, want %#v", &o.PublicKey, test.want.PublicKey) + if !reflect.DeepEqual(o.publicKey, test.want.publicKey) { + t.Errorf("got %#v, want %#v", &o.publicKey, test.want.publicKey) } - if test.want.ROpt != nil { - if len(o.ROpt) != len(test.want.ROpt) { - t.Errorf("got %d remote options, want %d", len(o.ROpt), len(test.want.ROpt)) + if test.want.rOpt != nil { + if len(o.rOpt) != len(test.want.rOpt) { + t.Errorf("got %d remote options, want %d", len(o.rOpt), len(test.want.rOpt)) } return } - if test.want.ROpt == nil { - if len(o.ROpt) != 0 { - t.Errorf("got %d remote options, want %d", len(o.ROpt), 0) + if test.want.rOpt == nil { + if len(o.rOpt) != 0 { + t.Errorf("got %d remote options, want %d", len(o.rOpt), 0) } } }) } } + +func TestPrivateKeyVerificationWithProxy(t *testing.T) { + g := NewWithT(t) + + registryAddr := testregistry.New(t) + + tagURL := fmt.Sprintf("%s/fluxcd/source-controller:v1.3.0", registryAddr) + ref, err := name.ParseReference(tagURL) + g.Expect(err).NotTo(HaveOccurred()) + + proxyAddr, proxyPort := testproxy.New(t) + + keys, err := cosign.GenerateKeyPair(func(b bool) ([]byte, error) { + return []byte("cosign-password"), nil + }) + g.Expect(err).NotTo(HaveOccurred()) + + tests := []struct { + name string + proxyURL *url.URL + err string + }{ + { + name: "with correct proxy", + proxyURL: &url.URL{Scheme: "http", Host: proxyAddr}, + err: "image tag not found", + }, + { + name: "with incorrect proxy", + proxyURL: &url.URL{Scheme: "http", Host: fmt.Sprintf("localhost:%d", proxyPort+1)}, + err: "connection refused", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() + + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.Proxy = http.ProxyURL(tt.proxyURL) + + var opts []Options + opts = append(opts, WithRemoteOptions(remote.WithTransport(transport))) + opts = append(opts, WithPublicKey(keys.PublicBytes)) + + verifier, err := NewCosignVerifier(ctx, opts...) + g.Expect(err).NotTo(HaveOccurred()) + + _, err = verifier.Verify(ctx, ref) + g.Expect(err.Error()).To(ContainSubstring(tt.err)) + }) + } +} diff --git a/internal/oci/notation/notation.go b/internal/oci/notation/notation.go new file mode 100644 index 000000000..0158ffd03 --- /dev/null +++ b/internal/oci/notation/notation.go @@ -0,0 +1,404 @@ +/* +Copyright 2023 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package notation + +import ( + "context" + "crypto/x509" + "encoding/pem" + "fmt" + "net/http" + "strings" + + "github.com/go-logr/logr" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + _ "github.com/notaryproject/notation-core-go/signature/cose" + _ "github.com/notaryproject/notation-core-go/signature/jws" + "github.com/notaryproject/notation-go" + "github.com/notaryproject/notation-go/registry" + verifier "github.com/notaryproject/notation-go/verifier" + "github.com/notaryproject/notation-go/verifier/trustpolicy" + "github.com/notaryproject/notation-go/verifier/truststore" + oras "oras.land/oras-go/v2/registry/remote" + oauth "oras.land/oras-go/v2/registry/remote/auth" + retryhttp "oras.land/oras-go/v2/registry/remote/retry" + + "github.com/fluxcd/source-controller/internal/helm/common" + "github.com/fluxcd/source-controller/internal/oci" +) + +// name of the trustpolicy file defined in the Secret containing +// notation public keys. +const DefaultTrustPolicyKey = "trustpolicy.json" + +// options is a struct that holds options for verifier. +type options struct { + rootCertificates [][]byte + rOpt []remote.Option + trustPolicy *trustpolicy.Document + auth authn.Authenticator + keychain authn.Keychain + insecure bool + logger logr.Logger + transport *http.Transport +} + +// Options is a function that configures the options applied to a Verifier. +type Options func(opts *options) + +// WithInsecureRegistry sets notation to verify against insecure registry. +func WithInsecureRegistry(insecure bool) Options { + return func(opts *options) { + opts.insecure = insecure + } +} + +// WithTrustPolicy sets the trust policy configuration. +func WithTrustPolicy(trustPolicy *trustpolicy.Document) Options { + return func(opts *options) { + opts.trustPolicy = trustPolicy + } +} + +// WithRootCertificates is a functional option for overriding the default +// rootCertificate options used by the verifier to set the root CA certificate for notary. +// It takes in a list of certificate data as an array of byte slices. +// The function returns a options function option that sets the public certificate +// in the notation options. +func WithRootCertificates(data [][]byte) Options { + return func(opts *options) { + opts.rootCertificates = data + } +} + +// WithRemoteOptions is a functional option for overriding the default +// remote options used by the verifier +func WithRemoteOptions(opts ...remote.Option) Options { + return func(o *options) { + o.rOpt = opts + } +} + +// WithAuth is a functional option for overriding the default +// authenticator options used by the verifier +func WithAuth(auth authn.Authenticator) Options { + return func(o *options) { + o.auth = auth + } +} + +// WithKeychain is a functional option for overriding the default +// keychain options used by the verifier +func WithKeychain(key authn.Keychain) Options { + return func(o *options) { + o.keychain = key + } +} + +// WithLogger is a function that returns an Options function to set the logger for the options. +// The logger is used for logging purposes within the options. +func WithLogger(logger logr.Logger) Options { + return func(o *options) { + o.logger = logger + } +} + +// WithTransport is a function that returns an Options function to set the transport for the options. +func WithTransport(transport *http.Transport) Options { + return func(o *options) { + o.transport = transport + } +} + +// NotationVerifier is a struct which is responsible for executing verification logic +type NotationVerifier struct { + auth authn.Authenticator + keychain authn.Keychain + verifier *notation.Verifier + opts []remote.Option + insecure bool + logger logr.Logger + transport *http.Transport +} + +var _ truststore.X509TrustStore = &trustStore{} + +// trustStore is used by notation-go/verifier to retrieve the root certificate for notary. +// The default behaviour is to read the certificate from disk and return it as a byte slice. +// The reason for implementing the interface here is to avoid reading the certificate from disk +// as the certificate is already available in memory. +type trustStore struct { + certs [][]byte +} + +// GetCertificates implements truststore.X509TrustStore. +func (s trustStore) GetCertificates(ctx context.Context, storeType truststore.Type, namedStore string) ([]*x509.Certificate, error) { + certs := []*x509.Certificate{} + for _, data := range s.certs { + raw := data + block, _ := pem.Decode(raw) + if block != nil { + raw = block.Bytes + } + + cert, err := x509.ParseCertificates(raw) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate '%s': %s", namedStore, err) + } + + certs = append(certs, cert...) + } + + return certs, nil +} + +// NewNotationVerifier initializes a new Verifier +func NewNotationVerifier(opts ...Options) (*NotationVerifier, error) { + o := options{} + for _, opt := range opts { + opt(&o) + } + + store := &trustStore{ + certs: o.rootCertificates, + } + + trustpolicy := o.trustPolicy + if trustpolicy == nil { + return nil, fmt.Errorf("trust policy cannot be empty") + } + + verifier, err := verifier.New(trustpolicy, store, nil) + if err != nil { + return nil, err + } + + return &NotationVerifier{ + auth: o.auth, + keychain: o.keychain, + verifier: &verifier, + opts: o.rOpt, + insecure: o.insecure, + logger: o.logger, + transport: o.transport, + }, nil +} + +// CleanTrustPolicy cleans the given trust policy by removing trust stores and trusted identities +// for trust policy statements that are set to skip signature verification but still have configured trust stores and/or trusted identities. +// It takes a pointer to a trustpolicy.Document and a logger from the logr package as input parameters. +// If the trustPolicy is nil, it returns nil. +// Otherwise, it iterates over the trustPolicy.TrustPolicies and checks if each trust policy statement's +// SignatureVerification.VerificationLevel is set to trustpolicy.LevelSkip.Name. +// If it is, it logs a warning message and removes the trust stores and trusted identities for that trust policy statement. +// Finally, it returns the modified trustPolicy. +func CleanTrustPolicy(trustPolicy *trustpolicy.Document, logger logr.Logger) *trustpolicy.Document { + if trustPolicy == nil { + return nil + } + + for i, j := range trustPolicy.TrustPolicies { + if j.SignatureVerification.VerificationLevel == trustpolicy.LevelSkip.Name { + if len(j.TrustStores) > 0 || len(j.TrustedIdentities) > 0 { + logger.Info(fmt.Sprintf("warning: trust policy statement '%s' is set to skip signature verification but configured with trust stores and/or trusted identities. Ignoring trust stores and trusted identities", j.Name)) + } + trustPolicy.TrustPolicies[i].TrustStores = []string{} + trustPolicy.TrustPolicies[i].TrustedIdentities = []string{} + } + } + + return trustPolicy +} + +// Verify verifies the authenticity of the given ref OCI image. +// It returns a boolean indicating if the verification was successful. +// It returns an error if the verification fails, nil otherwise. +func (v *NotationVerifier) Verify(ctx context.Context, ref name.Reference) (oci.VerificationResult, error) { + url := ref.Name() + + remoteRepo, err := v.remoteRepo(url) + if err != nil { + return oci.VerificationResultFailed, err + } + + repo := registry.NewRepository(remoteRepo) + + repoUrl, err := v.repoUrlWithDigest(url, ref) + if err != nil { + return oci.VerificationResultFailed, err + } + + verifyOptions := notation.VerifyOptions{ + ArtifactReference: repoUrl, + MaxSignatureAttempts: 3, + } + + _, outcomes, err := notation.Verify(ctx, *v.verifier, repo, verifyOptions) + if err != nil { + return oci.VerificationResultFailed, err + } + + return v.checkOutcome(outcomes, url) +} + +// checkOutcome checks the verification outcomes for a given URL and returns the corresponding OCI verification result. +// It takes a slice of verification outcomes and a URL as input parameters. +// If there are no verification outcomes, it returns a failed verification result with an error message. +// If the first verification outcome has a verification level of "trustpolicy.LevelSkip", it returns an ignored verification result. +// This function assumes that "trustpolicy.TypeIntegrity" is always enforced. It will return a successful validation result if "trustpolicy.TypeAuthenticity" is successful too. +// If any of the verification results have an error, it logs the error message and sets the "ignore" flag to true if the error type is "trustpolicy.TypeAuthenticity". +// If the "ignore" flag is true, it returns an ignored verification result. +// Otherwise, it returns a successful verification result. +// The function returns the OCI verification result and an error, if any. +func (v *NotationVerifier) checkOutcome(outcomes []*notation.VerificationOutcome, url string) (oci.VerificationResult, error) { + if len(outcomes) == 0 { + return oci.VerificationResultFailed, fmt.Errorf("signature verification failed for all the signatures associated with %s", url) + } + + // should only ever be one item in the outcomes slice + outcome := outcomes[0] + + // if the verification level is set to skip, we ignore the verification result + // as there should be no verification results in outcome and we do not want + // to mark the result as verified + if outcome.VerificationLevel == trustpolicy.LevelSkip { + return oci.VerificationResultIgnored, nil + } + + ignore := false + + // loop through verification results to check for errors + for _, i := range outcome.VerificationResults { + // error if action is not marked as `skip` and there is an error + if i.Error != nil { + // flag to ignore the verification result if the error is related to type `authenticity` + if i.Type == trustpolicy.TypeAuthenticity { + ignore = true + } + // log results of error + v.logger.Info(fmt.Sprintf("verification check for type '%s' failed for '%s' with message: '%s'", i.Type, url, i.Error.Error())) + } + } + + // if the ignore flag is set, we ignore the verification result so not to mark as verified + if ignore { + return oci.VerificationResultIgnored, nil + } + + // result is okay to mark as verified + return oci.VerificationResultSuccess, nil +} + +// remoteRepo is a function that creates a remote repository object for the given repository URL. +// It initializes the repository with the provided URL and sets the PlainHTTP flag based on the value of the 'insecure' field in the Verifier struct. +// It also sets up the credential provider based on the authentication configuration provided in the Verifier struct. +// If authentication is required, it retrieves the authentication credentials and sets up the repository client with the appropriate headers and credentials. +// Finally, it returns the remote repository object and any error encountered during the process. +func (v *NotationVerifier) remoteRepo(repoUrl string) (*oras.Repository, error) { + remoteRepo, err := oras.NewRepository(repoUrl) + if err != nil { + return &oras.Repository{}, err + } + + remoteRepo.PlainHTTP = v.insecure + + credentialProvider := func(ctx context.Context, registry string) (oauth.Credential, error) { + return oauth.EmptyCredential, nil + } + + auth := authn.Anonymous + + if v.auth != nil { + auth = v.auth + } else if v.keychain != nil { + source := common.StringResource{Registry: repoUrl} + + auth, err = v.keychain.Resolve(source) + if err != nil { + return &oras.Repository{}, err + } + } + + if auth != authn.Anonymous { + authConfig, err := auth.Authorization() + if err != nil { + return &oras.Repository{}, err + } + + credentialProvider = func(ctx context.Context, registry string) (oauth.Credential, error) { + if authConfig.Username != "" || authConfig.Password != "" || authConfig.IdentityToken != "" || authConfig.RegistryToken != "" { + return oauth.Credential{ + Username: authConfig.Username, + Password: authConfig.Password, + RefreshToken: authConfig.IdentityToken, + AccessToken: authConfig.RegistryToken, + }, nil + } + return oauth.EmptyCredential, nil + } + } + + hc := retryhttp.DefaultClient + if v.transport != nil { + hc = &http.Client{ + Transport: retryhttp.NewTransport(v.transport), + } + } + repoClient := &oauth.Client{ + Client: hc, + Header: http.Header{ + "User-Agent": {"flux"}, + }, + Credential: credentialProvider, + } + + remoteRepo.Client = repoClient + + return remoteRepo, nil +} + +// repoUrlWithDigest takes a repository URL and a reference and returns the repository URL with the digest appended to it. +// If the repository URL does not contain a tag or digest, it returns an error. +func (v *NotationVerifier) repoUrlWithDigest(repoUrl string, ref name.Reference) (string, error) { + if !strings.Contains(repoUrl, "@") { + image, err := remote.Image(ref, v.opts...) + if err != nil { + return "", err + } + + digest, err := image.Digest() + if err != nil { + return "", err + } + + lastIndex := strings.LastIndex(repoUrl, ":") + if lastIndex == -1 { + return "", fmt.Errorf("url %s does not contain tag or digest", repoUrl) + } + + firstPart := repoUrl[:lastIndex] + + if s := strings.Split(repoUrl, ":"); len(s) >= 2 { + repoUrl = fmt.Sprintf("%s@%s", firstPart, digest) + } else { + return "", fmt.Errorf("url %s does not contain tag or digest", repoUrl) + } + } + return repoUrl, nil +} diff --git a/internal/oci/notation/notation_test.go b/internal/oci/notation/notation_test.go new file mode 100644 index 000000000..cdd8a3872 --- /dev/null +++ b/internal/oci/notation/notation_test.go @@ -0,0 +1,651 @@ +/* +Copyright 2023 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package notation + +import ( + "context" + "fmt" + "net/http" + "net/url" + "path" + "reflect" + "testing" + + "github.com/go-logr/logr" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/notaryproject/notation-go" + "github.com/notaryproject/notation-go/verifier/trustpolicy" + . "github.com/onsi/gomega" + + "github.com/fluxcd/source-controller/internal/oci" + testproxy "github.com/fluxcd/source-controller/tests/proxy" + testregistry "github.com/fluxcd/source-controller/tests/registry" +) + +func TestOptions(t *testing.T) { + testCases := []struct { + name string + opts []Options + want *options + }{ + { + name: "no options", + want: &options{}, + }, + { + name: "signature option", + opts: []Options{WithRootCertificates([][]byte{[]byte("foo")})}, + want: &options{ + rootCertificates: [][]byte{[]byte("foo")}, + rOpt: nil, + }, + }, + { + name: "keychain option", + opts: []Options{ + WithRemoteOptions(remote.WithAuthFromKeychain(authn.DefaultKeychain)), + WithKeychain(authn.DefaultKeychain), + }, + want: &options{ + rootCertificates: nil, + rOpt: []remote.Option{remote.WithAuthFromKeychain(authn.DefaultKeychain)}, + keychain: authn.DefaultKeychain, + }, + }, + { + name: "keychain and authenticator option", + opts: []Options{ + WithRemoteOptions( + remote.WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + remote.WithAuthFromKeychain(authn.DefaultKeychain), + ), + WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + WithKeychain(authn.DefaultKeychain), + }, + want: &options{ + rootCertificates: nil, + rOpt: []remote.Option{ + remote.WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + remote.WithAuthFromKeychain(authn.DefaultKeychain), + }, + auth: &authn.Basic{Username: "foo", Password: "bar"}, + keychain: authn.DefaultKeychain, + }, + }, + { + name: "keychain, authenticator and transport option", + opts: []Options{ + WithRemoteOptions( + remote.WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + remote.WithAuthFromKeychain(authn.DefaultKeychain), + remote.WithTransport(http.DefaultTransport), + ), + WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + WithKeychain(authn.DefaultKeychain), + }, + want: &options{ + rootCertificates: nil, + rOpt: []remote.Option{ + remote.WithAuth(&authn.Basic{Username: "foo", Password: "bar"}), + remote.WithAuthFromKeychain(authn.DefaultKeychain), + remote.WithTransport(http.DefaultTransport), + }, + auth: &authn.Basic{Username: "foo", Password: "bar"}, + keychain: authn.DefaultKeychain, + }, + }, + { + name: "truststore, empty document", + opts: []Options{WithTrustPolicy(&trustpolicy.Document{})}, + want: &options{ + rootCertificates: nil, + rOpt: nil, + trustPolicy: &trustpolicy.Document{}, + }, + }, + { + name: "truststore, dummy document", + opts: []Options{WithTrustPolicy(dummyPolicyDocument())}, + want: &options{ + rootCertificates: nil, + rOpt: nil, + trustPolicy: dummyPolicyDocument(), + }, + }, + { + name: "insecure, false", + opts: []Options{WithInsecureRegistry(false)}, + want: &options{ + rootCertificates: nil, + rOpt: nil, + trustPolicy: nil, + insecure: false, + }, + }, + { + name: "insecure, true", + opts: []Options{WithInsecureRegistry(true)}, + want: &options{ + rootCertificates: nil, + rOpt: nil, + trustPolicy: nil, + insecure: true, + }, + }, + { + name: "insecure, default", + opts: []Options{}, + want: &options{ + rootCertificates: nil, + rOpt: nil, + trustPolicy: nil, + insecure: false, + }, + }, + } + + // Run the test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + o := options{} + for _, opt := range tc.opts { + opt(&o) + } + if !reflect.DeepEqual(o.rootCertificates, tc.want.rootCertificates) { + t.Errorf("got %#v, want %#v", &o.rootCertificates, tc.want.rootCertificates) + } + + if !reflect.DeepEqual(o.trustPolicy, tc.want.trustPolicy) { + t.Errorf("got %#v, want %#v", &o.trustPolicy, tc.want.trustPolicy) + } + + if tc.want.rOpt != nil { + if len(o.rOpt) != len(tc.want.rOpt) { + t.Errorf("got %d remote options, want %d", len(o.rOpt), len(tc.want.rOpt)) + } + return + } + + if tc.want.rOpt == nil { + if len(o.rOpt) != 0 { + t.Errorf("got %d remote options, want %d", len(o.rOpt), 0) + } + } + }) + } +} + +func TestCleanTrustPolicy(t *testing.T) { + testCases := []struct { + name string + policy []trustpolicy.TrustPolicy + want *trustpolicy.Document + wantLogMessage string + }{ + { + name: "no trust policy", + want: nil, + }, + { + name: "trust policy verification level set to strict and should not be cleaned", + policy: []trustpolicy.TrustPolicy{{ + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: nil, + }}, + want: &trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{{ + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: nil, + }}, + }, + }, + { + name: "trust policy with multiple policies and should not be cleaned", + policy: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: []string{"x509.subject:CN=Notation Test Root,O=Notary,L=Seattle,ST=WA,C=US"}, + }, + { + Name: "test-statement-name-2", + RegistryScopes: []string{"example.com/podInfo"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: nil, + }, + }, + want: &trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: []string{"x509.subject:CN=Notation Test Root,O=Notary,L=Seattle,ST=WA,C=US"}, + }, + { + Name: "test-statement-name-2", + RegistryScopes: []string{"example.com/podInfo"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: nil, + }, + }, + }, + }, + { + name: "trust policy verification level skip should be cleaned", + policy: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "skip"}, + TrustStores: []string{"test"}, + TrustedIdentities: []string{"x509.subject:CN=Notation Test Root,O=Notary,L=Seattle,ST=WA,C=US"}, + }, + }, + want: &trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "skip"}, + TrustStores: []string{}, + TrustedIdentities: []string{}, + }, + }, + }, + wantLogMessage: "warning: trust policy statement 'test-statement-name' is set to skip signature verification but configured with trust stores and/or trusted identities. Ignoring trust stores and trusted identities", + }, + { + name: "trust policy with multiple policies and mixture of verification levels including skip", + policy: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: []string{"x509.subject:CN=Notation Test Root,O=Notary,L=Seattle,ST=WA,C=US"}, + }, + { + Name: "test-statement-name-2", + RegistryScopes: []string{"example.com/podInfo"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "skip"}, + TrustStores: []string{"test"}, + TrustedIdentities: []string{"x509.subject:CN=Notation Test Root,O=Notary,L=Seattle,ST=WA,C=US"}, + }, + }, + want: &trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{ + { + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"test"}, + TrustedIdentities: []string{"x509.subject:CN=Notation Test Root,O=Notary,L=Seattle,ST=WA,C=US"}, + }, + { + Name: "test-statement-name-2", + RegistryScopes: []string{"example.com/podInfo"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "skip"}, + TrustStores: []string{}, + TrustedIdentities: []string{}, + }, + }, + }, + wantLogMessage: "warning: trust policy statement 'test-statement-name-2' is set to skip signature verification but configured with trust stores and/or trusted identities. Ignoring trust stores and trusted identities", + }, + } + + // Run the test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + l := &testLogger{[]string{}, logr.RuntimeInfo{CallDepth: 1}} + logger := logr.New(l) + + var policy *trustpolicy.Document + + if tc.policy != nil { + policy = &trustpolicy.Document{ + Version: "1.0", + TrustPolicies: tc.policy, + } + } + + cleanedPolicy := CleanTrustPolicy(policy, logger) + + if !reflect.DeepEqual(cleanedPolicy, tc.want) { + t.Errorf("got %#v, want %#v", cleanedPolicy, tc.want) + } + + if tc.wantLogMessage != "" { + g.Expect(len(l.Output)).Should(Equal(1)) + g.Expect(l.Output[0]).Should(Equal(tc.wantLogMessage)) + } + }) + } +} + +func TestOutcomeChecker(t *testing.T) { + testCases := []struct { + name string + outcome []*notation.VerificationOutcome + wantErrMessage string + wantLogMessage []string + wantVerificationResult oci.VerificationResult + }{ + { + name: "no outcome failed with error message", + wantVerificationResult: oci.VerificationResultFailed, + wantErrMessage: "signature verification failed for all the signatures associated with example.com/podInfo", + }, + { + name: "verification result ignored with log message", + outcome: []*notation.VerificationOutcome{ + { + VerificationLevel: trustpolicy.LevelAudit, + VerificationResults: []*notation.ValidationResult{ + { + Type: trustpolicy.TypeAuthenticity, + Action: trustpolicy.ActionLog, + Error: fmt.Errorf("123"), + }, + }, + }, + }, + wantVerificationResult: oci.VerificationResultIgnored, + wantLogMessage: []string{"verification check for type 'authenticity' failed for 'example.com/podInfo' with message: '123'"}, + }, + { + name: "verification result ignored with no log message (skip)", + outcome: []*notation.VerificationOutcome{ + { + VerificationLevel: trustpolicy.LevelSkip, + VerificationResults: []*notation.ValidationResult{}, + }, + }, + wantVerificationResult: oci.VerificationResultIgnored, + }, + { + name: "verification result success with log message", + outcome: []*notation.VerificationOutcome{ + { + VerificationLevel: trustpolicy.LevelAudit, + VerificationResults: []*notation.ValidationResult{ + { + Type: trustpolicy.TypeAuthenticTimestamp, + Action: trustpolicy.ActionLog, + Error: fmt.Errorf("456"), + }, + { + Type: trustpolicy.TypeExpiry, + Action: trustpolicy.ActionLog, + Error: fmt.Errorf("789"), + }, + }, + }, + }, + wantVerificationResult: oci.VerificationResultSuccess, + wantLogMessage: []string{ + "verification check for type 'authenticTimestamp' failed for 'example.com/podInfo' with message: '456'", + "verification check for type 'expiry' failed for 'example.com/podInfo' with message: '789'", + }, + }, + { + name: "verification result success with no log message", + outcome: []*notation.VerificationOutcome{ + { + VerificationLevel: trustpolicy.LevelAudit, + VerificationResults: []*notation.ValidationResult{}, + }, + }, + wantVerificationResult: oci.VerificationResultSuccess, + }, + } + + // Run the test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + l := &testLogger{[]string{}, logr.RuntimeInfo{CallDepth: 1}} + logger := logr.New(l) + + v := NotationVerifier{ + logger: logger, + } + + result, err := v.checkOutcome(tc.outcome, "example.com/podInfo") + + if tc.wantErrMessage != "" { + g.Expect(err).ToNot(BeNil()) + g.Expect(err.Error()).Should(Equal(tc.wantErrMessage)) + } else { + g.Expect(err).To(BeNil()) + } + + g.Expect(result).Should(Equal(tc.wantVerificationResult)) + g.Expect(len(l.Output)).Should(Equal(len(tc.wantLogMessage))) + + for i, j := range tc.wantLogMessage { + g.Expect(l.Output[i]).Should(Equal(j)) + } + }) + } +} + +func TestRepoUrlWithDigest(t *testing.T) { + testCases := []struct { + name string + repoUrl string + digest string + tag string + wantResultUrl string + wantErrMessage string + passUrlWithoutTag bool + }{ + { + name: "valid repo url with digest", + repoUrl: "ghcr.io/stefanprodan/charts/podinfo", + digest: "sha256:cdd538a0167e4b51152b71a477e51eb6737553510ce8797dbcc537e1342311bb", + wantResultUrl: "ghcr.io/stefanprodan/charts/podinfo@sha256:cdd538a0167e4b51152b71a477e51eb6737553510ce8797dbcc537e1342311bb", + wantErrMessage: "", + }, + { + name: "valid repo url with tag", + repoUrl: "ghcr.io/stefanprodan/charts/podinfo", + tag: "6.6.0", + wantResultUrl: "ghcr.io/stefanprodan/charts/podinfo@sha256:cdd538a0167e4b51152b71a477e51eb6737553510ce8797dbcc537e1342311bb", + wantErrMessage: "", + }, + { + name: "valid repo url without tag", + repoUrl: "ghcr.io/stefanprodan/charts/podinfo", + tag: "6.6.0", + wantResultUrl: "ghcr.io/stefanprodan/charts/podinfo@sha256:cdd538a0167e4b51152b71a477e51eb6737553510ce8797dbcc537e1342311bb", + wantErrMessage: "url ghcr.io/stefanprodan/charts/podinfo does not contain tag or digest", + passUrlWithoutTag: true, + }, + } + + // Run the test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + l := &testLogger{[]string{}, logr.RuntimeInfo{CallDepth: 1}} + logger := logr.New(l) + + v := NotationVerifier{ + logger: logger, + } + + var url string + repo, _ := name.NewRepository(tc.repoUrl) + var ref name.Reference + if tc.digest != "" { + ref = repo.Digest(tc.digest) + url = fmt.Sprintf("%s@%s", tc.repoUrl, tc.digest) + } else if tc.tag != "" { + ref = repo.Tag(tc.tag) + if !tc.passUrlWithoutTag { + url = fmt.Sprintf("%s:%s", tc.repoUrl, tc.tag) + } else { + url = tc.repoUrl + } + } else { + ref = repo.Tag(name.DefaultTag) + url = fmt.Sprintf("%s:%s", tc.repoUrl, name.DefaultTag) + } + + result, err := v.repoUrlWithDigest(url, ref) + + if tc.wantErrMessage != "" { + g.Expect(err).ToNot(BeNil()) + g.Expect(err.Error()).Should(Equal(tc.wantErrMessage)) + } else { + g.Expect(err).To(BeNil()) + g.Expect(result).Should(Equal(tc.wantResultUrl)) + } + }) + } +} + +func TestVerificationWithProxy(t *testing.T) { + g := NewWithT(t) + + registryAddr := testregistry.New(t) + + tarFilePath := path.Join("..", "..", "controller", "testdata", "podinfo", "podinfo-6.1.5.tar") + _, err := testregistry.CreatePodinfoImageFromTar(tarFilePath, "6.1.5", registryAddr) + g.Expect(err).NotTo(HaveOccurred()) + + tagURL := fmt.Sprintf("%s/podinfo:6.1.5", registryAddr) + ref, err := name.ParseReference(tagURL) + g.Expect(err).NotTo(HaveOccurred()) + + proxyAddr, proxyPort := testproxy.New(t) + + tests := []struct { + name string + proxyURL *url.URL + err string + }{ + { + name: "with correct proxy", + proxyURL: &url.URL{Scheme: "http", Host: proxyAddr}, + err: "no signature is associated with", + }, + { + name: "with incorrect proxy", + proxyURL: &url.URL{Scheme: "http", Host: fmt.Sprintf("localhost:%d", proxyPort+1)}, + err: "connection refused", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() + + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.Proxy = http.ProxyURL(tt.proxyURL) + + var opts []Options + opts = append(opts, WithTransport(transport)) + opts = append(opts, WithTrustPolicy(dummyPolicyDocument())) + opts = append(opts, WithInsecureRegistry(true)) + + verifier, err := NewNotationVerifier(opts...) + g.Expect(err).NotTo(HaveOccurred()) + + _, err = verifier.Verify(ctx, ref) + g.Expect(err.Error()).To(ContainSubstring(tt.err)) + }) + } +} + +func dummyPolicyDocument() (policyDoc *trustpolicy.Document) { + policyDoc = &trustpolicy.Document{ + Version: "1.0", + TrustPolicies: []trustpolicy.TrustPolicy{dummyPolicyStatement()}, + } + return +} + +func dummyPolicyStatement() (policyStatement trustpolicy.TrustPolicy) { + policyStatement = trustpolicy.TrustPolicy{ + Name: "test-statement-name", + RegistryScopes: []string{"*"}, + SignatureVerification: trustpolicy.SignatureVerification{VerificationLevel: "strict"}, + TrustStores: []string{"ca:valid-trust-store", "signingAuthority:valid-trust-store"}, + TrustedIdentities: []string{"x509.subject:CN=Notation Test Root,O=Notary,L=Seattle,ST=WA,C=US"}, + } + return +} + +// mocking LogSink to capture log messages. Source: https://stackoverflow.com/a/71425740 +type testLogger struct { + Output []string + r logr.RuntimeInfo +} + +func (t *testLogger) doLog(msg string) { + t.Output = append(t.Output, msg) +} + +func (t *testLogger) Init(info logr.RuntimeInfo) { + t.r = info +} + +func (t *testLogger) Enabled(level int) bool { + return true +} + +func (t *testLogger) Info(level int, msg string, keysAndValues ...interface{}) { + t.doLog(msg) +} + +func (t *testLogger) Error(err error, msg string, keysAndValues ...interface{}) { + t.doLog(msg) +} + +func (t *testLogger) WithValues(keysAndValues ...interface{}) logr.LogSink { + return t +} + +func (t *testLogger) WithName(name string) logr.LogSink { + return t +} diff --git a/internal/oci/verifier.go b/internal/oci/verifier.go index 2fb304e4e..eeb301eb0 100644 --- a/internal/oci/verifier.go +++ b/internal/oci/verifier.go @@ -18,154 +18,25 @@ package oci import ( "context" - "crypto" - "fmt" "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote" - "github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio" - coptions "github.com/sigstore/cosign/v2/cmd/cosign/cli/options" - "github.com/sigstore/cosign/v2/cmd/cosign/cli/rekor" - "github.com/sigstore/cosign/v2/pkg/cosign" - "github.com/sigstore/cosign/v2/pkg/oci" - ociremote "github.com/sigstore/cosign/v2/pkg/oci/remote" - "github.com/sigstore/sigstore/pkg/cryptoutils" - "github.com/sigstore/sigstore/pkg/signature" +) + +// VerificationResult represents the result of a verification process. +type VerificationResult string + +const ( + // VerificationResultSuccess indicates that the artifact has been verified. + VerificationResultSuccess VerificationResult = "verified" + // VerificationResultFailed indicates that the artifact could not be verified. + VerificationResultFailed VerificationResult = "unverified" + // VerificationResultIgnored indicates that the artifact has not been verified + // but is allowed to proceed. This is used primarily when notation is used + // as the verifier. + VerificationResultIgnored VerificationResult = "ignored" ) // Verifier is an interface for verifying the authenticity of an OCI image. type Verifier interface { - Verify(ctx context.Context, ref name.Reference) (bool, error) -} - -// options is a struct that holds options for verifier. -type options struct { - PublicKey []byte - ROpt []remote.Option - Identities []cosign.Identity -} - -// Options is a function that configures the options applied to a Verifier. -type Options func(opts *options) - -// WithPublicKey sets the public key. -func WithPublicKey(publicKey []byte) Options { - return func(opts *options) { - opts.PublicKey = publicKey - } -} - -// WithRemoteOptions is a functional option for overriding the default -// remote options used by the verifier. -func WithRemoteOptions(opts ...remote.Option) Options { - return func(o *options) { - o.ROpt = opts - } -} - -// WithIdentities specifies the identity matchers that have to be met -// for the signature to be deemed valid. -func WithIdentities(identities []cosign.Identity) Options { - return func(opts *options) { - opts.Identities = identities - } -} - -// CosignVerifier is a struct which is responsible for executing verification logic. -type CosignVerifier struct { - opts *cosign.CheckOpts -} - -// NewCosignVerifier initializes a new CosignVerifier. -func NewCosignVerifier(ctx context.Context, opts ...Options) (*CosignVerifier, error) { - o := options{} - for _, opt := range opts { - opt(&o) - } - - checkOpts := &cosign.CheckOpts{} - - ro := coptions.RegistryOptions{} - co, err := ro.ClientOpts(ctx) - if err != nil { - return nil, err - } - - checkOpts.Identities = o.Identities - if o.ROpt != nil { - co = append(co, ociremote.WithRemoteOptions(o.ROpt...)) - } - - checkOpts.RegistryClientOpts = co - - // If a public key is provided, it will use it to verify the signature. - // If there is no public key provided, it will try keyless verification. - // https://github.com/sigstore/cosign/blob/main/KEYLESS.md. - if len(o.PublicKey) > 0 { - checkOpts.Offline = true - // TODO(hidde): this is an oversight in our implementation. As it is - // theoretically possible to have a custom PK, without disabling tlog. - checkOpts.IgnoreTlog = true - - pubKeyRaw, err := cryptoutils.UnmarshalPEMToPublicKey(o.PublicKey) - if err != nil { - return nil, err - } - - checkOpts.SigVerifier, err = signature.LoadVerifier(pubKeyRaw, crypto.SHA256) - if err != nil { - return nil, err - } - } else { - checkOpts.RekorClient, err = rekor.NewClient(coptions.DefaultRekorURL) - if err != nil { - return nil, fmt.Errorf("unable to create Rekor client: %w", err) - } - - // This performs an online fetch of the Rekor public keys, but this is needed - // for verifying tlog entries (both online and offline). - // TODO(hidde): above note is important to keep in mind when we implement - // "offline" tlog above. - if checkOpts.RekorPubKeys, err = cosign.GetRekorPubs(ctx); err != nil { - return nil, fmt.Errorf("unable to get Rekor public keys: %w", err) - } - - checkOpts.CTLogPubKeys, err = cosign.GetCTLogPubs(ctx) - if err != nil { - return nil, fmt.Errorf("unable to get CTLog public keys: %w", err) - } - - if checkOpts.RootCerts, err = fulcio.GetRoots(); err != nil { - return nil, fmt.Errorf("unable to get Fulcio root certs: %w", err) - } - - if checkOpts.IntermediateCerts, err = fulcio.GetIntermediates(); err != nil { - return nil, fmt.Errorf("unable to get Fulcio intermediate certs: %w", err) - } - } - - return &CosignVerifier{ - opts: checkOpts, - }, nil -} - -// VerifyImageSignatures verify the authenticity of the given ref OCI image. -func (v *CosignVerifier) VerifyImageSignatures(ctx context.Context, ref name.Reference) ([]oci.Signature, bool, error) { - return cosign.VerifyImageSignatures(ctx, ref, v.opts) -} - -// Verify verifies the authenticity of the given ref OCI image. -// It returns a boolean indicating if the verification was successful. -// It returns an error if the verification fails, nil otherwise. -func (v *CosignVerifier) Verify(ctx context.Context, ref name.Reference) (bool, error) { - signatures, _, err := v.VerifyImageSignatures(ctx, ref) - if err != nil { - return false, err - } - - if len(signatures) == 0 { - return false, nil - } - - return true, nil + Verify(ctx context.Context, ref name.Reference) (VerificationResult, error) } diff --git a/internal/predicates/helmrepository_type_predicate.go b/internal/predicates/helmrepository_type_predicate.go index cc7c8fc7e..714d77942 100644 --- a/internal/predicates/helmrepository_type_predicate.go +++ b/internal/predicates/helmrepository_type_predicate.go @@ -22,7 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) // HelmRepositoryOCIMigrationPredicate implements predicate functions to allow diff --git a/internal/predicates/helmrepository_type_predicate_test.go b/internal/predicates/helmrepository_type_predicate_test.go index 0d3489d1f..e98728413 100644 --- a/internal/predicates/helmrepository_type_predicate_test.go +++ b/internal/predicates/helmrepository_type_predicate_test.go @@ -25,8 +25,7 @@ import ( "github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/runtime/conditions" - v1 "github.com/fluxcd/source-controller/api/v1" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) func TestHelmRepositoryOCIMigrationPredicate_Create(t *testing.T) { @@ -161,7 +160,7 @@ func TestHelmRepositoryOCIMigrationPredicate_Update(t *testing.T) { Type: sourcev1.HelmRepositoryTypeDefault, } oldObj.Status = sourcev1.HelmRepositoryStatus{ - Artifact: &v1.Artifact{}, + Artifact: &meta.Artifact{}, URL: "http://some-address", ObservedGeneration: 3, } diff --git a/internal/reconcile/reconcile.go b/internal/reconcile/reconcile.go index 58a160b8b..27c931168 100644 --- a/internal/reconcile/reconcile.go +++ b/internal/reconcile/reconcile.go @@ -137,7 +137,7 @@ func ComputeReconcileResult(obj conditions.Setter, res Result, recErr error, rb switch t := recErr.(type) { case *serror.Stalling: if res == ResultEmpty { - conditions.MarkStalled(obj, t.Reason, t.Error()) + conditions.MarkStalled(obj, t.Reason, "%s", t.Error()) // The current generation has been reconciled successfully and it // has resulted in a stalled state. Return no error to stop further // requeuing. diff --git a/internal/reconcile/reconcile_test.go b/internal/reconcile/reconcile_test.go index 15a60b0d4..e22f370b5 100644 --- a/internal/reconcile/reconcile_test.go +++ b/internal/reconcile/reconcile_test.go @@ -29,7 +29,7 @@ import ( "github.com/fluxcd/pkg/runtime/conditions" "github.com/fluxcd/pkg/runtime/patch" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" serror "github.com/fluxcd/source-controller/internal/error" ) diff --git a/internal/reconcile/summarize/processor_test.go b/internal/reconcile/summarize/processor_test.go index dc6765d83..44f68b5bf 100644 --- a/internal/reconcile/summarize/processor_test.go +++ b/internal/reconcile/summarize/processor_test.go @@ -26,7 +26,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/fluxcd/pkg/apis/meta" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" "github.com/fluxcd/source-controller/internal/object" "github.com/fluxcd/source-controller/internal/reconcile" ) @@ -64,6 +65,43 @@ func TestRecordReconcileReq(t *testing.T) { t.Expect(obj).To(HaveStatusLastHandledReconcileAt("now")) }, }, + { + name: "empty reconcile annotation value", + beforeFunc: func(obj client.Object) { + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "", + } + obj.SetAnnotations(annotations) + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt("")) + }, + }, + { + name: "whitespace-only reconcile annotation value", + beforeFunc: func(obj client.Object) { + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: " ", + } + obj.SetAnnotations(annotations) + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt(" ")) + }, + }, + { + name: "reconcile annotation overwrites existing status value", + beforeFunc: func(obj client.Object) { + object.SetStatusLastHandledReconcileAt(obj, "old-value") + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "new-value", + } + obj.SetAnnotations(annotations) + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt("new-value")) + }, + }, } for _, tt := range tests { diff --git a/internal/reconcile/summarize/summary.go b/internal/reconcile/summarize/summary.go index 3977ccdfb..8650a0907 100644 --- a/internal/reconcile/summarize/summary.go +++ b/internal/reconcile/summarize/summary.go @@ -234,7 +234,7 @@ func (h *Helper) SummarizeAndPatch(ctx context.Context, obj conditions.Setter, o } if len(failedBiPolarity) > 0 { topFailedBiPolarity := conditions.Get(obj, failedBiPolarity[0]) - conditions.MarkFalse(obj, meta.ReadyCondition, topFailedBiPolarity.Reason, topFailedBiPolarity.Message) + conditions.MarkFalse(obj, meta.ReadyCondition, topFailedBiPolarity.Reason, "%s", topFailedBiPolarity.Message) } // If object is not stalled, result is success and runtime error is nil, diff --git a/internal/tls/config.go b/internal/tls/config.go deleted file mode 100644 index 841c9538e..000000000 --- a/internal/tls/config.go +++ /dev/null @@ -1,161 +0,0 @@ -/* -Copyright 2023 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package tls - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - neturl "net/url" - - corev1 "k8s.io/api/core/v1" -) - -const CACrtKey = "ca.crt" - -// TLSBytes contains the bytes of the TLS files. -type TLSBytes struct { - // CertBytes is the bytes of the certificate file. - CertBytes []byte - // KeyBytes is the bytes of the key file. - KeyBytes []byte - // CABytes is the bytes of the CA file. - CABytes []byte -} - -// KubeTLSClientConfigFromSecret returns a TLS client config as a `tls.Config` -// object and in its bytes representation. The secret is expected to have the -// following keys: -// - tls.key, for the private key -// - tls.crt, for the certificate -// - ca.crt, for the CA certificate -// -// Secrets with no certificate, private key, AND CA cert are ignored. If only a -// certificate OR private key is found, an error is returned. The Secret type -// can be blank, Opaque or kubernetes.io/tls. -func KubeTLSClientConfigFromSecret(secret corev1.Secret, url string) (*tls.Config, *TLSBytes, error) { - return tlsClientConfigFromSecret(secret, url, true, true) -} - -// TLSClientConfigFromSecret returns a TLS client config as a `tls.Config` -// object and in its bytes representation. The secret is expected to have the -// following keys: -// - keyFile, for the private key -// - certFile, for the certificate -// - caFile, for the CA certificate -// -// Secrets with no certificate, private key, AND CA cert are ignored. If only a -// certificate OR private key is found, an error is returned. The Secret type -// can be blank, Opaque or kubernetes.io/tls. -func TLSClientConfigFromSecret(secret corev1.Secret, url string) (*tls.Config, *TLSBytes, error) { - return tlsClientConfigFromSecret(secret, url, false, true) -} - -// LegacyTLSClientConfigFromSecret returns a TLS client config as a `tls.Config` -// object and in its bytes representation. The secret is expected to have the -// following keys: -// - keyFile, for the private key -// - certFile, for the certificate -// - caFile, for the CA certificate -// -// Secrets with no certificate, private key, AND CA cert are ignored. If only a -// certificate OR private key is found, an error is returned. -func LegacyTLSClientConfigFromSecret(secret corev1.Secret, url string) (*tls.Config, *TLSBytes, error) { - return tlsClientConfigFromSecret(secret, url, false, false) -} - -// tlsClientConfigFromSecret attempts to construct and return a TLS client -// config from the given Secret. If the Secret does not contain any TLS -// data, it returns nil. -// -// kubernetesTLSKeys is a boolean indicating whether to check the Secret -// for keys expected to be present in a Kubernetes TLS Secret. Based on its -// value, the Secret is checked for the following keys: -// - tls.key/keyFile for the private key -// - tls.crt/certFile for the certificate -// - ca.crt/caFile for the CA certificate -// The keys should adhere to a single convention, i.e. a Secret with tls.key -// and certFile is invalid. -// -// checkType is a boolean indicating whether to check the Secret type. If true -// and the Secret's type is not blank, Opaque or kubernetes.io/tls, then an -// error is returned. -func tlsClientConfigFromSecret(secret corev1.Secret, url string, kubernetesTLSKeys bool, checkType bool) (*tls.Config, *TLSBytes, error) { - if checkType { - // Only Secrets of type Opaque and TLS are allowed. We also allow Secrets with a blank - // type, to avoid having to specify the type of the Secret for every test case. - // Since a real Kubernetes Secret is of type Opaque by default, its safe to allow this. - switch secret.Type { - case corev1.SecretTypeOpaque, corev1.SecretTypeTLS, "": - default: - return nil, nil, fmt.Errorf("cannot use secret '%s' to construct TLS config: invalid secret type: '%s'", secret.Name, secret.Type) - } - } - - var certBytes, keyBytes, caBytes []byte - if kubernetesTLSKeys { - certBytes, keyBytes, caBytes = secret.Data[corev1.TLSCertKey], secret.Data[corev1.TLSPrivateKeyKey], secret.Data[CACrtKey] - } else { - certBytes, keyBytes, caBytes = secret.Data["certFile"], secret.Data["keyFile"], secret.Data["caFile"] - } - - switch { - case len(certBytes)+len(keyBytes)+len(caBytes) == 0: - return nil, nil, nil - case (len(certBytes) > 0 && len(keyBytes) == 0) || (len(keyBytes) > 0 && len(certBytes) == 0): - return nil, nil, fmt.Errorf("invalid '%s' secret data: both certificate and private key need to be provided", - secret.Name) - } - - tlsConf := &tls.Config{ - MinVersion: tls.VersionTLS12, - } - if len(certBytes) > 0 && len(keyBytes) > 0 { - cert, err := tls.X509KeyPair(certBytes, keyBytes) - if err != nil { - return nil, nil, err - } - tlsConf.Certificates = append(tlsConf.Certificates, cert) - } - - if len(caBytes) > 0 { - cp, err := x509.SystemCertPool() - if err != nil { - return nil, nil, fmt.Errorf("cannot retrieve system certificate pool: %w", err) - } - if !cp.AppendCertsFromPEM(caBytes) { - return nil, nil, fmt.Errorf("cannot append certificate into certificate pool: invalid CA certificate") - } - - tlsConf.RootCAs = cp - } - - if url != "" { - u, err := neturl.Parse(url) - if err != nil { - return nil, nil, fmt.Errorf("cannot parse repository URL: %w", err) - } - - tlsConf.ServerName = u.Hostname() - } - - return tlsConf, &TLSBytes{ - CertBytes: certBytes, - KeyBytes: keyBytes, - CABytes: caBytes, - }, nil -} diff --git a/internal/tls/config_test.go b/internal/tls/config_test.go deleted file mode 100644 index 949142a07..000000000 --- a/internal/tls/config_test.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright 2023 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package tls - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "fmt" - "math/big" - "net/url" - "testing" - - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" -) - -func Test_tlsClientConfigFromSecret(t *testing.T) { - kubernetesTlsSecretFixture := validTlsSecret(t, true) - tlsSecretFixture := validTlsSecret(t, false) - - tests := []struct { - name string - secret corev1.Secret - modify func(secret *corev1.Secret) - tlsKeys bool - checkType bool - url string - wantErr bool - wantNil bool - }{ - { - name: "tls.crt, tls.key and ca.crt", - secret: kubernetesTlsSecretFixture, - modify: nil, - tlsKeys: true, - url: "https://example.com", - }, - { - name: "certFile, keyFile and caFile", - secret: tlsSecretFixture, - modify: nil, - tlsKeys: false, - url: "https://example.com", - }, - { - name: "without tls.crt", - secret: kubernetesTlsSecretFixture, - modify: func(s *corev1.Secret) { delete(s.Data, "tls.crt") }, - tlsKeys: true, - wantErr: true, - wantNil: true, - }, - { - name: "without tls.key", - secret: kubernetesTlsSecretFixture, - modify: func(s *corev1.Secret) { delete(s.Data, "tls.key") }, - tlsKeys: true, - wantErr: true, - wantNil: true, - }, - { - name: "without ca.crt", - secret: kubernetesTlsSecretFixture, - modify: func(s *corev1.Secret) { delete(s.Data, "ca.crt") }, - tlsKeys: true, - }, - { - name: "empty secret", - secret: corev1.Secret{}, - tlsKeys: true, - wantNil: true, - }, - { - name: "docker config secret with type checking enabled", - secret: tlsSecretFixture, - modify: func(secret *corev1.Secret) { secret.Type = corev1.SecretTypeDockerConfigJson }, - tlsKeys: false, - checkType: true, - wantErr: true, - wantNil: true, - }, - { - name: "docker config secret with type checking disabled", - secret: tlsSecretFixture, - modify: func(secret *corev1.Secret) { secret.Type = corev1.SecretTypeDockerConfigJson }, - tlsKeys: false, - url: "https://example.com", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - secret := tt.secret.DeepCopy() - if tt.modify != nil { - tt.modify(secret) - } - - tlsConfig, _, err := tlsClientConfigFromSecret(*secret, tt.url, tt.tlsKeys, tt.checkType) - g.Expect(err != nil).To(Equal(tt.wantErr), fmt.Sprintf("expected error: %v, got: %v", tt.wantErr, err)) - g.Expect(tlsConfig == nil).To(Equal(tt.wantNil)) - if tt.url != "" { - u, _ := url.Parse(tt.url) - g.Expect(u.Hostname()).To(Equal(tlsConfig.ServerName)) - } - }) - } -} - -// validTlsSecret creates a secret containing key pair and CA certificate that are -// valid from a syntax (minimum requirements) perspective. -func validTlsSecret(t *testing.T, kubernetesTlsKeys bool) corev1.Secret { - t.Helper() - key, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - t.Fatal("Private key cannot be created.", err.Error()) - } - - certTemplate := x509.Certificate{ - SerialNumber: big.NewInt(1337), - } - cert, err := x509.CreateCertificate(rand.Reader, &certTemplate, &certTemplate, &key.PublicKey, key) - if err != nil { - t.Fatal("Certificate cannot be created.", err.Error()) - } - - ca := &x509.Certificate{ - SerialNumber: big.NewInt(7331), - IsCA: true, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - } - - caPrivKey, err := rsa.GenerateKey(rand.Reader, 4096) - if err != nil { - t.Fatal("CA private key cannot be created.", err.Error()) - } - - caBytes, err := x509.CreateCertificate(rand.Reader, ca, ca, &caPrivKey.PublicKey, caPrivKey) - if err != nil { - t.Fatal("CA certificate cannot be created.", err.Error()) - } - - keyPem := pem.EncodeToMemory(&pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(key), - }) - - certPem := pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: cert, - }) - - caPem := pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: caBytes, - }) - - crtKey := corev1.TLSCertKey - pkKey := corev1.TLSPrivateKeyKey - caKey := CACrtKey - if !kubernetesTlsKeys { - crtKey = "certFile" - pkKey = "keyFile" - caKey = "caFile" - } - return corev1.Secret{ - Data: map[string][]byte{ - crtKey: []byte(certPem), - pkKey: []byte(keyPem), - caKey: []byte(caPem), - }, - } -} diff --git a/internal/transport/transport.go b/internal/transport/transport.go deleted file mode 100644 index 89286df71..000000000 --- a/internal/transport/transport.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package transport - -import ( - "crypto/tls" - "fmt" - "net" - "net/http" - "sync" - "time" -) - -// TransportPool is a progressive and non-blocking pool -// for http.Transport objects, optimised for Gargabe Collection -// and without a hard limit on number of objects created. -// -// Its main purpose is to enable for transport objects to be -// used across helm chart download requests and helm/pkg/getter -// instances by leveraging the getter.WithTransport(t) construct. -// -// The use of this pool improves the default behaviour of helm getter -// which creates a new connection per request, or per getter instance, -// resulting on unnecessary TCP connections with the target. -// -// http.Transport objects may contain sensitive material and also have -// settings that may impact the security of HTTP operations using -// them (i.e. InsecureSkipVerify). Therefore, ensure that they are -// used in a thread-safe way, and also by reseting TLS specific state -// after each use. -// -// Calling the Release(t) function will reset TLS specific state whilst -// also releasing the transport back to the pool to be reused. -// -// xref: https://github.com/helm/helm/pull/10568 -// xref2: https://github.com/fluxcd/source-controller/issues/578 -type TransportPool struct { -} - -var pool = &sync.Pool{ - New: func() interface{} { - return &http.Transport{ - DisableCompression: true, - Proxy: http.ProxyFromEnvironment, - - // Due to the non blocking nature of this approach, - // at peak usage a higher number of transport objects - // may be created. sync.Pool will ensure they are - // gargage collected when/if needed. - // - // By setting a low value to IdleConnTimeout the connections - // will be closed after that period of inactivity, allowing the - // transport to be garbage collected. - IdleConnTimeout: 60 * time.Second, - - // use safe defaults based off http.DefaultTransport - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } - }, -} - -// NewOrIdle tries to return an existing transport that is not currently being used. -// If none is found, creates a new Transport instead. -// -// tlsConfig can optionally set the TLSClientConfig for the transport. -func NewOrIdle(tlsConfig *tls.Config) *http.Transport { - t := pool.Get().(*http.Transport) - t.TLSClientConfig = tlsConfig - - return t -} - -// Release releases the transport back to the TransportPool after -// sanitising its sensitive fields. -func Release(transport *http.Transport) error { - if transport == nil { - return fmt.Errorf("cannot release nil transport") - } - - transport.TLSClientConfig = nil - - pool.Put(transport) - return nil -} diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go deleted file mode 100644 index f0bc387d6..000000000 --- a/internal/transport/transport_test.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package transport - -import ( - "crypto/tls" - "testing" -) - -func Test_TransportReuse(t *testing.T) { - t1 := NewOrIdle(nil) - t2 := NewOrIdle(nil) - - if t1 == t2 { - t.Errorf("same transported returned twice") - } - - err := Release(t2) - if err != nil { - t.Errorf("error releasing transport t2: %v", err) - } - - t3 := NewOrIdle(&tls.Config{ - ServerName: "testing", - }) - if t3.TLSClientConfig == nil || t3.TLSClientConfig.ServerName != "testing" { - t.Errorf("TLSClientConfig not properly configured") - } - - err = Release(t3) - if err != nil { - t.Errorf("error releasing transport t3: %v", err) - } - if t3.TLSClientConfig != nil { - t.Errorf("TLSClientConfig not cleared after release") - } - - err = Release(nil) - if err == nil { - t.Errorf("should not allow release nil transport") - } else if err.Error() != "cannot release nil transport" { - t.Errorf("wanted error message: 'cannot release nil transport' got: %q", err.Error()) - } -} diff --git a/main.go b/main.go index e2bd08efb..cb019e6e4 100644 --- a/main.go +++ b/main.go @@ -18,8 +18,6 @@ package main import ( "fmt" - "net" - "net/http" "os" "time" @@ -36,8 +34,15 @@ import ( ctrlcache "sigs.k8s.io/controller-runtime/pkg/cache" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" ctrlcfg "sigs.k8s.io/controller-runtime/pkg/config" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + artcfg "github.com/fluxcd/pkg/artifact/config" + artdigest "github.com/fluxcd/pkg/artifact/digest" + artsrv "github.com/fluxcd/pkg/artifact/server" + artstore "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" + pkgcache "github.com/fluxcd/pkg/cache" "github.com/fluxcd/pkg/git" "github.com/fluxcd/pkg/runtime/client" helper "github.com/fluxcd/pkg/runtime/controller" @@ -50,14 +55,12 @@ import ( "github.com/fluxcd/pkg/runtime/pprof" "github.com/fluxcd/pkg/runtime/probes" - v1 "github.com/fluxcd/source-controller/api/v1" - "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" // +kubebuilder:scaffold:imports "github.com/fluxcd/source-controller/internal/cache" "github.com/fluxcd/source-controller/internal/controller" - intdigest "github.com/fluxcd/source-controller/internal/digest" "github.com/fluxcd/source-controller/internal/features" "github.com/fluxcd/source-controller/internal/helm" "github.com/fluxcd/source-controller/internal/helm/registry" @@ -83,37 +86,37 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(v1beta2.AddToScheme(scheme)) - utilruntime.Must(v1.AddToScheme(scheme)) + utilruntime.Must(sourcev1.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme } func main() { + const ( + tokenCacheDefaultMaxSize = 100 + ) + var ( - metricsAddr string - eventsAddr string - healthAddr string - storagePath string - storageAddr string - storageAdvAddr string - concurrent int - requeueDependency time.Duration - helmIndexLimit int64 - helmChartLimit int64 - helmChartFileLimit int64 - clientOptions client.Options - logOptions logger.Options - leaderElectionOptions leaderelection.Options - rateLimiterOptions helper.RateLimiterOptions - featureGates feathelper.FeatureGates - watchOptions helper.WatchOptions - intervalJitterOptions jitter.IntervalOptions - helmCacheMaxSize int - helmCacheTTL string - helmCachePurgeInterval string - artifactRetentionTTL time.Duration - artifactRetentionRecords int - artifactDigestAlgo string + metricsAddr string + eventsAddr string + healthAddr string + concurrent int + requeueDependency time.Duration + helmIndexLimit int64 + helmChartLimit int64 + helmChartFileLimit int64 + artifactOptions artcfg.Options + clientOptions client.Options + logOptions logger.Options + leaderElectionOptions leaderelection.Options + rateLimiterOptions helper.RateLimiterOptions + featureGates feathelper.FeatureGates + watchOptions helper.WatchOptions + intervalJitterOptions jitter.IntervalOptions + helmCacheMaxSize int + helmCacheTTL string + helmCachePurgeInterval string + tokenCacheOptions pkgcache.TokenFlags + defaultServiceAccount string ) flag.StringVar(&metricsAddr, "metrics-addr", envOrDefault("METRICS_ADDR", ":8080"), @@ -121,12 +124,6 @@ func main() { flag.StringVar(&eventsAddr, "events-addr", envOrDefault("EVENTS_ADDR", ""), "The address of the events receiver.") flag.StringVar(&healthAddr, "health-addr", ":9440", "The address the health endpoint binds to.") - flag.StringVar(&storagePath, "storage-path", envOrDefault("STORAGE_PATH", ""), - "The local storage path.") - flag.StringVar(&storageAddr, "storage-addr", envOrDefault("STORAGE_ADDR", ":9090"), - "The address the static file server binds to.") - flag.StringVar(&storageAdvAddr, "storage-adv-addr", envOrDefault("STORAGE_ADV_ADDR", ""), - "The advertised address of the static file server.") flag.IntVar(&concurrent, "concurrent", 2, "The number of concurrent reconciles per controller.") flag.Int64Var(&helmIndexLimit, "helm-index-max-size", helm.MaxIndexSize, "The max allowed size in bytes of a Helm repository index file.") @@ -146,13 +143,10 @@ func main() { "The list of key exchange algorithms to use for ssh connections, arranged from most preferred to the least.") flag.StringSliceVar(&git.HostKeyAlgos, "ssh-hostkey-algos", []string{}, "The list of hostkey algorithms to use for ssh connections, arranged from most preferred to the least.") - flag.DurationVar(&artifactRetentionTTL, "artifact-retention-ttl", 60*time.Second, - "The duration of time that artifacts from previous reconciliations will be kept in storage before being garbage collected.") - flag.IntVar(&artifactRetentionRecords, "artifact-retention-records", 2, - "The maximum number of artifacts to be kept in storage after a garbage collection.") - flag.StringVar(&artifactDigestAlgo, "artifact-digest-algo", intdigest.Canonical.String(), - "The algorithm to use to calculate the digest of artifacts.") + flag.StringVar(&defaultServiceAccount, auth.ControllerFlagDefaultServiceAccount, + "", "Default service account to use for workload identity when not specified in resources.") + artifactOptions.BindFlags(flag.CommandLine) clientOptions.BindFlags(flag.CommandLine) logOptions.BindFlags(flag.CommandLine) leaderElectionOptions.BindFlags(flag.CommandLine) @@ -160,16 +154,34 @@ func main() { featureGates.BindFlags(flag.CommandLine) watchOptions.BindFlags(flag.CommandLine) intervalJitterOptions.BindFlags(flag.CommandLine) + tokenCacheOptions.BindFlags(flag.CommandLine, tokenCacheDefaultMaxSize) flag.Parse() logger.SetLogger(logger.NewLogger(logOptions)) + if defaultServiceAccount != "" { + auth.SetDefaultServiceAccount(defaultServiceAccount) + } + if err := featureGates.WithLogger(setupLog).SupportedFeatures(features.FeatureGates()); err != nil { setupLog.Error(err, "unable to load feature gates") os.Exit(1) } + switch enabled, err := features.Enabled(auth.FeatureGateObjectLevelWorkloadIdentity); { + case err != nil: + setupLog.Error(err, "unable to check feature gate "+auth.FeatureGateObjectLevelWorkloadIdentity) + os.Exit(1) + case enabled: + auth.EnableObjectLevelWorkloadIdentity() + } + + if auth.InconsistentObjectLevelConfiguration() { + setupLog.Error(auth.ErrInconsistentObjectLevelConfiguration, "invalid configuration") + os.Exit(1) + } + if err := intervalJitterOptions.SetGlobalJitter(nil); err != nil { setupLog.Error(err, "unable to set global jitter") os.Exit(1) @@ -179,14 +191,39 @@ func main() { probes.SetupChecks(mgr, setupLog) - metrics := helper.NewMetrics(mgr, metrics.MustMakeRecorder(), v1.SourceFinalizer) + metrics := helper.NewMetrics(mgr, metrics.MustMakeRecorder(), sourcev1.SourceFinalizer) cacheRecorder := cache.MustMakeMetrics() eventRecorder := mustSetupEventRecorder(mgr, eventsAddr, controllerName) - storage := mustInitStorage(storagePath, storageAdvAddr, artifactRetentionTTL, artifactRetentionRecords, artifactDigestAlgo) + + algo, err := artdigest.AlgorithmForName(artifactOptions.ArtifactDigestAlgo) + if err != nil { + setupLog.Error(err, "unable to configure canonical digest algorithm") + os.Exit(1) + } + artdigest.Canonical = algo + + storage, err := artstore.New(&artifactOptions) + if err != nil { + setupLog.Error(err, "unable to configure artifact storage") + os.Exit(1) + } mustSetupHelmLimits(helmIndexLimit, helmChartLimit, helmChartFileLimit) helmIndexCache, helmIndexCacheItemTTL := mustInitHelmCache(helmCacheMaxSize, helmCacheTTL, helmCachePurgeInterval) + var tokenCache *pkgcache.TokenCache + if tokenCacheOptions.MaxSize > 0 { + var err error + tokenCache, err = pkgcache.NewTokenCache(tokenCacheOptions.MaxSize, + pkgcache.WithMaxDuration(tokenCacheOptions.MaxDuration), + pkgcache.WithMetricsRegisterer(ctrlmetrics.Registry), + pkgcache.WithMetricsPrefix("gotk_token_")) + if err != nil { + setupLog.Error(err, "unable to create token cache") + os.Exit(1) + } + } + ctx := ctrl.SetupSignalHandler() if err := (&controller.GitRepositoryReconciler{ @@ -195,11 +232,12 @@ func main() { Metrics: metrics, Storage: storage, ControllerName: controllerName, + TokenCache: tokenCache, }).SetupWithManagerAndOptions(mgr, controller.GitRepositoryReconcilerOptions{ DependencyRequeueInterval: requeueDependency, RateLimiter: helper.GetRateLimiter(rateLimiterOptions), }); err != nil { - setupLog.Error(err, "unable to create controller", "controller", v1beta2.GitRepositoryKind) + setupLog.Error(err, "unable to create controller", "controller", sourcev1.GitRepositoryKind) os.Exit(1) } @@ -216,7 +254,7 @@ func main() { }).SetupWithManagerAndOptions(mgr, controller.HelmRepositoryReconcilerOptions{ RateLimiter: helper.GetRateLimiter(rateLimiterOptions), }); err != nil { - setupLog.Error(err, "unable to create controller", "controller", v1beta2.HelmRepositoryKind) + setupLog.Error(err, "unable to create controller", "controller", sourcev1.HelmRepositoryKind) os.Exit(1) } @@ -234,7 +272,7 @@ func main() { }).SetupWithManagerAndOptions(ctx, mgr, controller.HelmChartReconcilerOptions{ RateLimiter: helper.GetRateLimiter(rateLimiterOptions), }); err != nil { - setupLog.Error(err, "unable to create controller", "controller", v1beta2.HelmChartKind) + setupLog.Error(err, "unable to create controller", "controller", sourcev1.HelmChartKind) os.Exit(1) } @@ -244,10 +282,11 @@ func main() { Metrics: metrics, Storage: storage, ControllerName: controllerName, + TokenCache: tokenCache, }).SetupWithManagerAndOptions(mgr, controller.BucketReconcilerOptions{ RateLimiter: helper.GetRateLimiter(rateLimiterOptions), }); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Bucket") + setupLog.Error(err, "unable to create controller", "controller", sourcev1.BucketKind) os.Exit(1) } @@ -256,11 +295,12 @@ func main() { Storage: storage, EventRecorder: eventRecorder, ControllerName: controllerName, + TokenCache: tokenCache, Metrics: metrics, }).SetupWithManagerAndOptions(mgr, controller.OCIRepositoryReconcilerOptions{ RateLimiter: helper.GetRateLimiter(rateLimiterOptions), }); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "OCIRepository") + setupLog.Error(err, "unable to create controller", "controller", sourcev1.OCIRepositoryKind) os.Exit(1) } // +kubebuilder:scaffold:builder @@ -271,7 +311,11 @@ func main() { // to handle that. <-mgr.Elected() - startFileServer(storage.BasePath, storageAddr) + // Start the artifact server if running as leader. + if err := artsrv.Start(ctx, &artifactOptions); err != nil { + setupLog.Error(err, "artifact server error") + os.Exit(1) + } }() setupLog.Info("starting manager") @@ -281,17 +325,6 @@ func main() { } } -func startFileServer(path string, address string) { - setupLog.Info("starting file server") - fs := http.FileServer(http.Dir(path)) - mux := http.NewServeMux() - mux.Handle("/", fs) - err := http.ListenAndServe(address, mux) - if err != nil { - setupLog.Error(err, "file server error") - } -} - func mustSetupEventRecorder(mgr ctrl.Manager, eventsAddr, controllerName string) record.EventRecorder { eventRecorder, err := events.NewRecorder(mgr, ctrl.Log, eventsAddr, controllerName) if err != nil { @@ -348,11 +381,11 @@ func mustSetupManager(metricsAddr, healthAddr string, maxConcurrent int, }, Cache: ctrlcache.Options{ ByObject: map[ctrlclient.Object]ctrlcache.ByObject{ - &v1.GitRepository{}: {Label: watchSelector}, - &v1beta2.HelmRepository{}: {Label: watchSelector}, - &v1beta2.HelmChart{}: {Label: watchSelector}, - &v1beta2.Bucket{}: {Label: watchSelector}, - &v1beta2.OCIRepository{}: {Label: watchSelector}, + &sourcev1.GitRepository{}: {Label: watchSelector}, + &sourcev1.HelmRepository{}: {Label: watchSelector}, + &sourcev1.HelmChart{}: {Label: watchSelector}, + &sourcev1.Bucket{}: {Label: watchSelector}, + &sourcev1.OCIRepository{}: {Label: watchSelector}, }, }, Metrics: metricsserver.Options{ @@ -406,51 +439,6 @@ func mustInitHelmCache(maxSize int, itemTTL, purgeInterval string) (*cache.Cache return cache.New(maxSize, interval), ttl } -func mustInitStorage(path string, storageAdvAddr string, artifactRetentionTTL time.Duration, artifactRetentionRecords int, artifactDigestAlgo string) *controller.Storage { - if storageAdvAddr == "" { - storageAdvAddr = determineAdvStorageAddr(storageAdvAddr) - } - - if artifactDigestAlgo != intdigest.Canonical.String() { - algo, err := intdigest.AlgorithmForName(artifactDigestAlgo) - if err != nil { - setupLog.Error(err, "unable to configure canonical digest algorithm") - os.Exit(1) - } - intdigest.Canonical = algo - } - - storage, err := controller.NewStorage(path, storageAdvAddr, artifactRetentionTTL, artifactRetentionRecords) - if err != nil { - setupLog.Error(err, "unable to initialise storage") - os.Exit(1) - } - return storage -} - -func determineAdvStorageAddr(storageAddr string) string { - host, port, err := net.SplitHostPort(storageAddr) - if err != nil { - setupLog.Error(err, "unable to parse storage address") - os.Exit(1) - } - switch host { - case "": - host = "localhost" - case "0.0.0.0": - host = os.Getenv("HOSTNAME") - if host == "" { - hn, err := os.Hostname() - if err != nil { - setupLog.Error(err, "0.0.0.0 specified in storage addr but hostname is invalid") - os.Exit(1) - } - host = hn - } - } - return net.JoinHostPort(host, port) -} - func envOrDefault(envName, defaultValue string) string { ret := os.Getenv(envName) if ret != "" { diff --git a/pkg/minio/minio.go b/pkg/minio/minio.go deleted file mode 100644 index 7343f753e..000000000 --- a/pkg/minio/minio.go +++ /dev/null @@ -1,138 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package minio - -import ( - "context" - "errors" - "fmt" - - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" - "github.com/minio/minio-go/v7/pkg/s3utils" - corev1 "k8s.io/api/core/v1" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" -) - -// MinioClient is a minimal Minio client for fetching files from S3 compatible -// storage APIs. -type MinioClient struct { - *minio.Client -} - -// NewClient creates a new Minio storage client. -func NewClient(bucket *sourcev1.Bucket, secret *corev1.Secret) (*MinioClient, error) { - opt := minio.Options{ - Region: bucket.Spec.Region, - Secure: !bucket.Spec.Insecure, - // About BucketLookup, it should be noted that not all S3 providers support - // path-type access (e.g., Ali OSS). Hence, we revert to using the default - // auto access, which we believe can cover most use cases. - } - - if secret != nil { - var accessKey, secretKey string - if k, ok := secret.Data["accesskey"]; ok { - accessKey = string(k) - } - if k, ok := secret.Data["secretkey"]; ok { - secretKey = string(k) - } - if accessKey != "" && secretKey != "" { - opt.Creds = credentials.NewStaticV4(accessKey, secretKey, "") - } - } else if bucket.Spec.Provider == sourcev1.AmazonBucketProvider { - opt.Creds = credentials.NewIAM("") - } - - client, err := minio.New(bucket.Spec.Endpoint, &opt) - if err != nil { - return nil, err - } - return &MinioClient{Client: client}, nil -} - -// ValidateSecret validates the credential secret. The provided Secret may -// be nil. -func ValidateSecret(secret *corev1.Secret) error { - if secret == nil { - return nil - } - err := fmt.Errorf("invalid '%s' secret data: required fields 'accesskey' and 'secretkey'", secret.Name) - if _, ok := secret.Data["accesskey"]; !ok { - return err - } - if _, ok := secret.Data["secretkey"]; !ok { - return err - } - return nil -} - -// FGetObject gets the object from the provided object storage bucket, and -// writes it to targetPath. -// It returns the etag of the successfully fetched file, or any error. -func (c *MinioClient) FGetObject(ctx context.Context, bucketName, objectName, localPath string) (string, error) { - stat, err := c.Client.StatObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - return "", err - } - opts := minio.GetObjectOptions{} - if err = opts.SetMatchETag(stat.ETag); err != nil { - return "", err - } - if err = c.Client.FGetObject(ctx, bucketName, objectName, localPath, opts); err != nil { - return "", err - } - return stat.ETag, nil -} - -// VisitObjects iterates over the items in the provided object storage -// bucket, calling visit for every item. -// If the underlying client or the visit callback returns an error, -// it returns early. -func (c *MinioClient) VisitObjects(ctx context.Context, bucketName string, prefix string, visit func(key, etag string) error) error { - for object := range c.Client.ListObjects(ctx, bucketName, minio.ListObjectsOptions{ - Recursive: true, - Prefix: prefix, - UseV1: s3utils.IsGoogleEndpoint(*c.Client.EndpointURL()), - }) { - if object.Err != nil { - err := fmt.Errorf("listing objects from bucket '%s' failed: %w", bucketName, object.Err) - return err - } - - if err := visit(object.Key, object.ETag); err != nil { - return err - } - } - return nil -} - -// ObjectIsNotFound checks if the error provided is a minio.ErrResponse -// with "NoSuchKey" code. -func (c *MinioClient) ObjectIsNotFound(err error) bool { - if resp := new(minio.ErrorResponse); errors.As(err, resp) { - return resp.Code == "NoSuchKey" - } - return false -} - -// Close closes the Minio Client and logs any useful errors. -func (c *MinioClient) Close(_ context.Context) { - // Minio client does not provide a close method -} diff --git a/pkg/minio/minio_test.go b/pkg/minio/minio_test.go deleted file mode 100644 index 40eb3deee..000000000 --- a/pkg/minio/minio_test.go +++ /dev/null @@ -1,353 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package minio - -import ( - "context" - "fmt" - "log" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/google/uuid" - miniov7 "github.com/minio/minio-go/v7" - "github.com/ory/dockertest/v3" - "github.com/ory/dockertest/v3/docker" - "gotest.tools/assert" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/sourceignore" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" -) - -const ( - objectName string = "test.yaml" - objectEtag string = "2020beab5f1711919157756379622d1d" -) - -var ( - // testMinioVersion is the version (image tag) of the Minio server image - // used to test against. - testMinioVersion = "RELEASE.2022-12-12T19-27-27Z" - // testMinioRootUser is the root user of the Minio server. - testMinioRootUser = "fluxcd" - // testMinioRootPassword is the root password of the Minio server. - testMinioRootPassword = "passw0rd!" - // testVaultAddress is the address of the Minio server, it is set - // by TestMain after booting it. - testMinioAddress string - // testMinioClient is the Minio client used to test against, it is set - // by TestMain after booting the Minio server. - testMinioClient *MinioClient -) - -var ( - bucketName = "test-bucket-minio" + uuid.New().String() - prefix = "" - secret = corev1.Secret{ - ObjectMeta: v1.ObjectMeta{ - Name: "minio-secret", - Namespace: "default", - }, - Data: map[string][]byte{ - "accesskey": []byte(testMinioRootUser), - "secretkey": []byte(testMinioRootPassword), - }, - Type: "Opaque", - } - emptySecret = corev1.Secret{ - ObjectMeta: v1.ObjectMeta{ - Name: "minio-secret", - Namespace: "default", - }, - Data: map[string][]byte{}, - Type: "Opaque", - } - bucket = sourcev1.Bucket{ - ObjectMeta: v1.ObjectMeta{ - Name: "minio-test-bucket", - Namespace: "default", - }, - Spec: sourcev1.BucketSpec{ - BucketName: bucketName, - Provider: "generic", - SecretRef: &meta.LocalObjectReference{ - Name: secret.Name, - }, - }, - } - bucketAwsProvider = sourcev1.Bucket{ - ObjectMeta: v1.ObjectMeta{ - Name: "minio-test-bucket", - Namespace: "default", - }, - Spec: sourcev1.BucketSpec{ - BucketName: bucketName, - Provider: "aws", - }, - } -) - -func TestMain(m *testing.M) { - // Uses a sensible default on Windows (TCP/HTTP) and Linux/MacOS (socket) - pool, err := dockertest.NewPool("") - if err != nil { - log.Fatalf("could not connect to docker: %s", err) - } - - // Pull the image, create a container based on it, and run it - resource, err := pool.RunWithOptions(&dockertest.RunOptions{ - Repository: "minio/minio", - Tag: testMinioVersion, - ExposedPorts: []string{ - "9000/tcp", - "9001/tcp", - }, - Env: []string{ - "MINIO_ROOT_USER=" + testMinioRootUser, - "MINIO_ROOT_PASSWORD=" + testMinioRootPassword, - }, - Cmd: []string{"server", "/data", "--console-address", ":9001"}, - }, func(config *docker.HostConfig) { - config.AutoRemove = true - }) - if err != nil { - log.Fatalf("could not start resource: %s", err) - } - - purgeResource := func() { - if err := pool.Purge(resource); err != nil { - log.Printf("could not purge resource: %s", err) - } - } - - // Set the address of the Minio server used for testing. - testMinioAddress = fmt.Sprintf("127.0.0.1:%v", resource.GetPort("9000/tcp")) - - // Construct a Minio client using the address of the Minio server. - testMinioClient, err = NewClient(bucketStub(bucket, testMinioAddress), secret.DeepCopy()) - if err != nil { - log.Fatalf("cannot create Minio client: %s", err) - } - - // Wait until Minio is ready to serve requests... - if err := pool.Retry(func() error { - hCancel, err := testMinioClient.HealthCheck(1 * time.Second) - if err != nil { - log.Fatalf("cannot start Minio health check: %s", err) - } - defer hCancel() - - if !testMinioClient.IsOnline() { - return fmt.Errorf("client is offline: Minio is not ready") - } - return nil - }); err != nil { - purgeResource() - log.Fatalf("could not connect to docker: %s", err) - } - - ctx := context.Background() - createBucket(ctx) - addObjectToBucket(ctx) - run := m.Run() - removeObjectFromBucket(ctx) - deleteBucket(ctx) - purgeResource() - os.Exit(run) -} - -func TestNewClient(t *testing.T) { - minioClient, err := NewClient(bucketStub(bucket, testMinioAddress), secret.DeepCopy()) - assert.NilError(t, err) - assert.Assert(t, minioClient != nil) -} - -func TestNewClientEmptySecret(t *testing.T) { - minioClient, err := NewClient(bucketStub(bucket, testMinioAddress), emptySecret.DeepCopy()) - assert.NilError(t, err) - assert.Assert(t, minioClient != nil) -} - -func TestNewClientAwsProvider(t *testing.T) { - minioClient, err := NewClient(bucketStub(bucketAwsProvider, testMinioAddress), nil) - assert.NilError(t, err) - assert.Assert(t, minioClient != nil) -} - -func TestBucketExists(t *testing.T) { - ctx := context.Background() - exists, err := testMinioClient.BucketExists(ctx, bucketName) - assert.NilError(t, err) - assert.Assert(t, exists) -} - -func TestBucketNotExists(t *testing.T) { - ctx := context.Background() - exists, err := testMinioClient.BucketExists(ctx, "notexistsbucket") - assert.NilError(t, err) - assert.Assert(t, !exists) -} - -func TestFGetObject(t *testing.T) { - ctx := context.Background() - tempDir := t.TempDir() - path := filepath.Join(tempDir, sourceignore.IgnoreFile) - _, err := testMinioClient.FGetObject(ctx, bucketName, objectName, path) - assert.NilError(t, err) -} - -func TestFGetObjectNotExists(t *testing.T) { - ctx := context.Background() - tempDir := t.TempDir() - badKey := "invalid.txt" - path := filepath.Join(tempDir, badKey) - _, err := testMinioClient.FGetObject(ctx, bucketName, badKey, path) - assert.Error(t, err, "The specified key does not exist.") - assert.Check(t, testMinioClient.ObjectIsNotFound(err)) -} - -func TestVisitObjects(t *testing.T) { - keys := []string{} - etags := []string{} - err := testMinioClient.VisitObjects(context.TODO(), bucketName, prefix, func(key, etag string) error { - keys = append(keys, key) - etags = append(etags, etag) - return nil - }) - assert.NilError(t, err) - assert.DeepEqual(t, keys, []string{objectName}) - assert.DeepEqual(t, etags, []string{objectEtag}) -} - -func TestVisitObjectsErr(t *testing.T) { - ctx := context.Background() - badBucketName := "bad-bucket" - err := testMinioClient.VisitObjects(ctx, badBucketName, prefix, func(string, string) error { - return nil - }) - assert.Error(t, err, fmt.Sprintf("listing objects from bucket '%s' failed: The specified bucket does not exist", badBucketName)) -} - -func TestVisitObjectsCallbackErr(t *testing.T) { - mockErr := fmt.Errorf("mock") - err := testMinioClient.VisitObjects(context.TODO(), bucketName, prefix, func(key, etag string) error { - return mockErr - }) - assert.Error(t, err, mockErr.Error()) -} - -func TestValidateSecret(t *testing.T) { - t.Parallel() - testCases := []struct { - name string - secret *corev1.Secret - error bool - }{ - { - name: "valid secret", - secret: secret.DeepCopy(), - }, - { - name: "nil secret", - secret: nil, - }, - { - name: "invalid secret", - secret: emptySecret.DeepCopy(), - error: true, - }, - } - for _, testCase := range testCases { - tt := testCase - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - err := ValidateSecret(tt.secret) - if tt.error { - assert.Error(t, err, fmt.Sprintf("invalid '%v' secret data: required fields 'accesskey' and 'secretkey'", tt.secret.Name)) - } else { - assert.NilError(t, err) - } - }) - } -} - -func bucketStub(bucket sourcev1.Bucket, endpoint string) *sourcev1.Bucket { - b := bucket.DeepCopy() - b.Spec.Endpoint = endpoint - b.Spec.Insecure = true - return b -} - -func createBucket(ctx context.Context) { - if err := testMinioClient.Client.MakeBucket(ctx, bucketName, miniov7.MakeBucketOptions{}); err != nil { - exists, errBucketExists := testMinioClient.BucketExists(ctx, bucketName) - if errBucketExists == nil && exists { - deleteBucket(ctx) - } else { - log.Fatalf("could not create bucket: %s", err) - } - } -} - -func deleteBucket(ctx context.Context) { - if err := testMinioClient.Client.RemoveBucket(ctx, bucketName); err != nil { - log.Println(err) - } -} - -func addObjectToBucket(ctx context.Context) { - fileReader := strings.NewReader(getObjectFile()) - fileSize := fileReader.Size() - _, err := testMinioClient.Client.PutObject(ctx, bucketName, objectName, fileReader, fileSize, miniov7.PutObjectOptions{ - ContentType: "text/x-yaml", - }) - if err != nil { - log.Println(err) - } -} - -func removeObjectFromBucket(ctx context.Context) { - if err := testMinioClient.Client.RemoveObject(ctx, bucketName, objectName, miniov7.RemoveObjectOptions{ - GovernanceBypass: true, - }); err != nil { - log.Println(err) - } -} - -func getObjectFile() string { - return ` - apiVersion: source.toolkit.fluxcd.io/v1beta2 - kind: Bucket - metadata: - name: podinfo - namespace: default - spec: - interval: 5m - provider: aws - bucketName: podinfo - endpoint: s3.amazonaws.com - region: us-east-1 - timeout: 30s - ` -} diff --git a/tests/fuzz/Dockerfile.builder b/tests/fuzz/Dockerfile.builder index 7fabaee78..0b45115bb 100644 --- a/tests/fuzz/Dockerfile.builder +++ b/tests/fuzz/Dockerfile.builder @@ -1,9 +1,9 @@ FROM gcr.io/oss-fuzz-base/base-builder-go -RUN wget https://go.dev/dl/go1.21.3.linux-amd64.tar.gz \ +RUN wget https://go.dev/dl/go1.24.0.linux-amd64.tar.gz \ && mkdir temp-go \ && rm -rf /root/.go/* \ - && tar -C temp-go/ -xzf go1.21.3.linux-amd64.tar.gz \ + && tar -C temp-go/ -xzf go1.24.0.linux-amd64.tar.gz \ && mv temp-go/go/* /root/.go/ ENV SRC=$GOPATH/src/github.com/fluxcd/source-controller diff --git a/tests/listener/listener.go b/tests/listener/listener.go new file mode 100644 index 000000000..289b2adf0 --- /dev/null +++ b/tests/listener/listener.go @@ -0,0 +1,47 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testlistener + +import ( + "net" + "strconv" + "strings" + "testing" + + . "github.com/onsi/gomega" +) + +// New creates a TCP listener on a random port and returns +// the listener, the address and the port of this listener. +// It also registers a cleanup function to close the listener +// when the test ends. +func New(t *testing.T) (net.Listener, string, int) { + t.Helper() + + lis, err := net.Listen("tcp", "localhost:0") + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + t.Cleanup(func() { lis.Close() }) + + addr := lis.Addr().String() + addrParts := strings.Split(addr, ":") + portStr := addrParts[len(addrParts)-1] + port, err := strconv.Atoi(portStr) + g.Expect(err).NotTo(HaveOccurred()) + + return lis, addr, port +} diff --git a/tests/proxy/proxy.go b/tests/proxy/proxy.go new file mode 100644 index 000000000..33fadece4 --- /dev/null +++ b/tests/proxy/proxy.go @@ -0,0 +1,48 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testproxy + +import ( + "net/http" + "testing" + + "github.com/elazarl/goproxy" + + testlistener "github.com/fluxcd/source-controller/tests/listener" +) + +// New creates a new goproxy server on a random port and returns +// the address and the port of this server. It also registers a +// cleanup functions to close the server and the listener when +// the test ends. +func New(t *testing.T) (string, int) { + t.Helper() + + lis, addr, port := testlistener.New(t) + + handler := goproxy.NewProxyHttpServer() + handler.Verbose = true + + server := &http.Server{ + Addr: addr, + Handler: handler, + } + go server.Serve(lis) + t.Cleanup(func() { server.Close() }) + + return addr, port +} diff --git a/tests/registry/registry.go b/tests/registry/registry.go new file mode 100644 index 000000000..28b36fd20 --- /dev/null +++ b/tests/registry/registry.go @@ -0,0 +1,124 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testregistry + +import ( + "context" + "fmt" + "io" + "net/url" + "strings" + "testing" + "time" + + "github.com/distribution/distribution/v3/configuration" + "github.com/distribution/distribution/v3/registry" + _ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory" + "github.com/google/go-containerregistry/pkg/crane" + gcrv1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/mutate" + . "github.com/onsi/gomega" + "github.com/sirupsen/logrus" + + "github.com/fluxcd/pkg/oci" + + testlistener "github.com/fluxcd/source-controller/tests/listener" +) + +func New(t *testing.T) string { + t.Helper() + + // Get a free random port and release it so the registry can use it. + listener, addr, _ := testlistener.New(t) + err := listener.Close() + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + + config := &configuration.Configuration{} + config.HTTP.Addr = addr + config.HTTP.DrainTimeout = time.Duration(10) * time.Second + config.Storage = map[string]configuration.Parameters{"inmemory": map[string]interface{}{}} + config.Log.AccessLog.Disabled = true + config.Log.Level = "error" + logrus.SetOutput(io.Discard) + + r, err := registry.NewRegistry(context.Background(), config) + g.Expect(err).NotTo(HaveOccurred()) + + go r.ListenAndServe() + + return addr +} + +type PodinfoImage struct { + URL string + Tag string + Digest gcrv1.Hash +} + +func CreatePodinfoImageFromTar(tarFilePath, tag, registryURL string, opts ...crane.Option) (*PodinfoImage, error) { + // Create Image + image, err := crane.Load(tarFilePath) + if err != nil { + return nil, err + } + + image = setPodinfoImageAnnotations(image, tag) + + // url.Parse doesn't handle urls with no scheme well e.g localhost: + if !(strings.HasPrefix(registryURL, "http://") || strings.HasPrefix(registryURL, "https://")) { + registryURL = fmt.Sprintf("http://%s", registryURL) + } + + myURL, err := url.Parse(registryURL) + if err != nil { + return nil, err + } + repositoryURL := fmt.Sprintf("%s/podinfo", myURL.Host) + + // Image digest + podinfoImageDigest, err := image.Digest() + if err != nil { + return nil, err + } + + // Push image + err = crane.Push(image, repositoryURL, opts...) + if err != nil { + return nil, err + } + + // Tag the image + err = crane.Tag(repositoryURL, tag, opts...) + if err != nil { + return nil, err + } + + return &PodinfoImage{ + URL: "oci://" + repositoryURL, + Tag: tag, + Digest: podinfoImageDigest, + }, nil +} + +func setPodinfoImageAnnotations(img gcrv1.Image, tag string) gcrv1.Image { + metadata := map[string]string{ + oci.SourceAnnotation: "https://github.com/stefanprodan/podinfo", + oci.RevisionAnnotation: fmt.Sprintf("%s@sha1:b3b00fe35424a45d373bf4c7214178bc36fd7872", tag), + } + return mutate.Annotations(img, metadata).(gcrv1.Image) +}