diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml index e5db81621..f8796c21f 100644 --- a/.github/dependabot.yaml +++ b/.github/dependabot.yaml @@ -5,7 +5,7 @@ updates: directory: "/" labels: ["dependencies"] schedule: - interval: "daily" + interval: "monthly" groups: go-deps: patterns: @@ -37,4 +37,4 @@ updates: patterns: - "*" schedule: - interval: "daily" + interval: "monthly" diff --git a/.github/labels.yaml b/.github/labels.yaml index b4cc5408b..2f3e1d525 100644 --- a/.github/labels.yaml +++ b/.github/labels.yaml @@ -34,3 +34,9 @@ - name: backport:release/v1.5.x description: To be backported to release/v1.5.x color: '#ffd700' +- name: backport:release/v1.6.x + description: To be backported to release/v1.6.x + color: '#ffd700' +- name: backport:release/v1.7.x + description: To be backported to release/v1.7.x + color: '#ffd700' diff --git a/.github/workflows/backport.yaml b/.github/workflows/backport.yaml index 7a0974317..4081bb128 100644 --- a/.github/workflows/backport.yaml +++ b/.github/workflows/backport.yaml @@ -1,34 +1,12 @@ name: backport - on: pull_request_target: types: [closed, labeled] - -permissions: - contents: read - jobs: - pull-request: - runs-on: ubuntu-latest + backport: permissions: - contents: write - pull-requests: write - if: github.event.pull_request.state == 'closed' && github.event.pull_request.merged && (github.event_name != 'labeled' || startsWith('backport:', github.event.label.name)) - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - ref: ${{ github.event.pull_request.head.sha }} - - name: Create backport PRs - uses: korthout/backport-action@be567af183754f6a5d831ae90f648954763f17f5 # v3.1.0 - # xref: https://github.com/korthout/backport-action#inputs - with: - # Use token to allow workflows to be triggered for the created PR - github_token: ${{ secrets.BOT_GITHUB_TOKEN }} - # Match labels with a pattern `backport:` - label_pattern: '^backport:([^ ]+)$' - # A bit shorter pull-request title than the default - pull_title: '[${target_branch}] ${pull_title}' - # Simpler PR description than default - pull_description: |- - Automated backport to `${target_branch}`, triggered by a label in #${pull_number}. + contents: write # for reading and creating branches. + pull-requests: write # for creating pull requests against release branches. + uses: fluxcd/gha-workflows/.github/workflows/backport.yaml@v0.4.0 + secrets: + github-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/cifuzz.yaml b/.github/workflows/cifuzz.yaml index 20f2ba1cc..16ddaa227 100644 --- a/.github/workflows/cifuzz.yaml +++ b/.github/workflows/cifuzz.yaml @@ -4,27 +4,16 @@ on: branches: - 'main' - 'release/**' - paths-ignore: - - 'CHANGELOG.md' - - 'README.md' - - 'MAINTAINERS' - -permissions: - contents: read - jobs: smoketest: runs-on: ubuntu-latest + permissions: + contents: read # for reading the repository code. steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Setup Go - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 + - name: Test suite setup + uses: fluxcd/gha-workflows/.github/actions/setup-kubernetes@v0.4.0 with: - go-version: 1.23.x - cache-dependency-path: | - **/go.sum - **/go.mod + go-version: 1.25.x - name: Smoke test Fuzzers run: make fuzz-smoketest env: diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index 2db226cde..483e65ad6 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -1,46 +1,28 @@ name: e2e - on: workflow_dispatch: pull_request: - branches: - - 'main' - - 'release/**' push: branches: - 'main' - 'release/**' - -permissions: - contents: read # for actions/checkout to fetch code - jobs: - kind-linux-amd64: runs-on: ubuntu-latest + permissions: + contents: read # for reading the repository code. steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Setup Go - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 + - name: Test suite setup + uses: fluxcd/gha-workflows/.github/actions/setup-kubernetes@v0.4.0 with: - go-version: 1.23.x - cache-dependency-path: | - **/go.sum - **/go.mod + go-version: 1.25.x + - name: Verify + run: make verify - name: Enable integration tests # Only run integration tests for main and release branches if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release/') run: | echo 'GO_TAGS=integration' >> $GITHUB_ENV - - name: Setup Kubernetes - uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0 - with: - cluster_name: kind - - name: Setup Kustomize - uses: fluxcd/pkg/actions/kustomize@main - - name: Setup Helm - uses: fluxcd/pkg/actions/helm@main - name: Run E2E tests env: SKIP_COSIGN_VERIFICATION: true diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml deleted file mode 100644 index 3e684fc68..000000000 --- a/.github/workflows/nightly.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: nightly -on: - schedule: - - cron: '0 0 * * *' - workflow_dispatch: - -env: - REPOSITORY: ${{ github.repository }} - -permissions: - contents: read # for actions/checkout to fetch code - -jobs: - build: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Setup QEMU - uses: docker/setup-qemu-action@4574d27a4764455b42196d70a065bc6853246a25 # v3.4.0 - - name: Setup Docker Buildx - id: buildx - uses: docker/setup-buildx-action@f7ce87c1d6bead3e36075b2ce75da1f6cc28aaca # v3.9.0 - with: - buildkitd-flags: "--debug" - - name: Build multi-arch container image - uses: docker/build-push-action@ca877d9245402d1537745e0e356eab47c3520991 # v6.13.0 - with: - push: false - builder: ${{ steps.buildx.outputs.name }} - context: . - file: ./Dockerfile - platforms: linux/amd64,linux/arm/v7,linux/arm64 - tags: | - ${{ env.REPOSITORY }}:nightly diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 000000000..9cc8d6e17 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,66 @@ +name: release +on: + push: + tags: + - 'v*' + workflow_dispatch: + inputs: + tag: + description: 'image tag prefix' + default: 'rc' + required: true +jobs: + release: + permissions: + contents: write # for creating the GitHub release. + id-token: write # for creating OIDC tokens for signing. + packages: write # for pushing and signing container images. + uses: fluxcd/gha-workflows/.github/workflows/controller-release.yaml@v0.4.0 + with: + controller: ${{ github.event.repository.name }} + release-candidate-prefix: ${{ github.event.inputs.tag }} + secrets: + github-token: ${{ secrets.GITHUB_TOKEN }} + dockerhub-token: ${{ secrets.DOCKER_FLUXCD_PASSWORD }} + release-provenance: + needs: [release] + permissions: + actions: read # for detecting the Github Actions environment. + id-token: write # for creating OIDC tokens for signing. + contents: write # for uploading attestations to GitHub releases. + if: startsWith(github.ref, 'refs/tags/v') + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0 + with: + provenance-name: "provenance.intoto.jsonl" + base64-subjects: "${{ needs.release.outputs.release-digests }}" + upload-assets: true + dockerhub-provenance: + needs: [release] + permissions: + contents: read # for reading the repository code. + actions: read # for detecting the Github Actions environment. + id-token: write # for creating OIDC tokens for signing. + packages: write # for uploading attestations. + if: startsWith(github.ref, 'refs/tags/v') + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0 + with: + image: ${{ needs.release.outputs.image-name }} + digest: ${{ needs.release.outputs.image-digest }} + registry-username: ${{ github.repository_owner == 'fluxcd' && 'fluxcdbot' || github.repository_owner }} + secrets: + registry-password: ${{ secrets.DOCKER_FLUXCD_PASSWORD }} + ghcr-provenance: + needs: [release] + permissions: + contents: read # for reading the repository code. + actions: read # for detecting the Github Actions environment. + id-token: write # for creating OIDC tokens for signing. + packages: write # for uploading attestations. + if: startsWith(github.ref, 'refs/tags/v') + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0 + with: + image: ghcr.io/${{ needs.release.outputs.image-name }} + digest: ${{ needs.release.outputs.image-digest }} + registry-username: fluxcdbot # not necessary for ghcr.io + secrets: + registry-password: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index 1868bc4dd..000000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,160 +0,0 @@ -name: release -on: - push: - tags: - - 'v*' - workflow_dispatch: - inputs: - tag: - description: 'image tag prefix' - default: 'preview' - required: true - -permissions: - contents: read - -env: - CONTROLLER: ${{ github.event.repository.name }} - -jobs: - release: - outputs: - hashes: ${{ steps.slsa.outputs.hashes }} - image_url: ${{ steps.slsa.outputs.image_url }} - image_digest: ${{ steps.slsa.outputs.image_digest }} - runs-on: ubuntu-latest - permissions: - contents: write # for creating the GitHub release. - id-token: write # for creating OIDC tokens for signing. - packages: write # for pushing and signing container images. - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Setup Kustomize - uses: fluxcd/pkg/actions/kustomize@main - - name: Prepare - id: prep - run: | - VERSION="${{ github.event.inputs.tag }}-${GITHUB_SHA::8}" - if [[ $GITHUB_REF == refs/tags/* ]]; then - VERSION=${GITHUB_REF/refs\/tags\//} - fi - echo "BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT - echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT - - name: Setup QEMU - uses: docker/setup-qemu-action@4574d27a4764455b42196d70a065bc6853246a25 # v3.4.0 - - name: Setup Docker Buildx - id: buildx - uses: docker/setup-buildx-action@f7ce87c1d6bead3e36075b2ce75da1f6cc28aaca # v3.9.0 - - name: Login to GitHub Container Registry - uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 - with: - registry: ghcr.io - username: fluxcdbot - password: ${{ secrets.GHCR_TOKEN }} - - name: Login to Docker Hub - uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 - with: - username: fluxcdbot - password: ${{ secrets.DOCKER_FLUXCD_PASSWORD }} - - name: Generate images meta - id: meta - uses: docker/metadata-action@369eb591f429131d6889c46b94e711f089e6ca96 # v5.6.1 - with: - images: | - fluxcd/${{ env.CONTROLLER }} - ghcr.io/fluxcd/${{ env.CONTROLLER }} - tags: | - type=raw,value=${{ steps.prep.outputs.VERSION }} - - name: Publish images - id: build-push - uses: docker/build-push-action@ca877d9245402d1537745e0e356eab47c3520991 # v6.13.0 - with: - sbom: true - provenance: true - push: true - builder: ${{ steps.buildx.outputs.name }} - context: . - file: ./Dockerfile - platforms: linux/amd64,linux/arm/v7,linux/arm64 - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - - uses: sigstore/cosign-installer@c56c2d3e59e4281cc41dea2217323ba5694b171e # v3.8.0 - - name: Sign images - env: - COSIGN_EXPERIMENTAL: 1 - run: | - cosign sign --yes fluxcd/${{ env.CONTROLLER }}@${{ steps.build-push.outputs.digest }} - cosign sign --yes ghcr.io/fluxcd/${{ env.CONTROLLER }}@${{ steps.build-push.outputs.digest }} - - name: Generate release artifacts - if: startsWith(github.ref, 'refs/tags/v') - run: | - mkdir -p config/release - kustomize build ./config/crd > ./config/release/${{ env.CONTROLLER }}.crds.yaml - kustomize build ./config/manager > ./config/release/${{ env.CONTROLLER }}.deployment.yaml - - uses: anchore/sbom-action/download-syft@f325610c9f50a54015d37c8d16cb3b0e2c8f4de0 # v0.18.0 - - name: Create release and SBOM - id: run-goreleaser - if: startsWith(github.ref, 'refs/tags/v') - uses: goreleaser/goreleaser-action@9ed2f89a662bf1735a48bc8557fd212fa902bebf # v6.1.0 - with: - version: latest - args: release --clean --skip=validate - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Generate SLSA metadata - id: slsa - env: - ARTIFACTS: "${{ steps.run-goreleaser.outputs.artifacts }}" - run: | - hashes=$(echo -E $ARTIFACTS | jq --raw-output '.[] | {name, "digest": (.extra.Digest // .extra.Checksum)} | select(.digest) | {digest} + {name} | join(" ") | sub("^sha256:";"")' | base64 -w0) - echo "hashes=$hashes" >> $GITHUB_OUTPUT - - image_url=fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.version }} - echo "image_url=$image_url" >> $GITHUB_OUTPUT - - image_digest=${{ steps.build-push.outputs.digest }} - echo "image_digest=$image_digest" >> $GITHUB_OUTPUT - - release-provenance: - needs: [release] - permissions: - actions: read # for detecting the Github Actions environment. - id-token: write # for creating OIDC tokens for signing. - contents: write # for uploading attestations to GitHub releases. - if: startsWith(github.ref, 'refs/tags/v') - uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0 - with: - provenance-name: "provenance.intoto.jsonl" - base64-subjects: "${{ needs.release.outputs.hashes }}" - upload-assets: true - - dockerhub-provenance: - needs: [release] - permissions: - actions: read # for detecting the Github Actions environment. - id-token: write # for creating OIDC tokens for signing. - packages: write # for uploading attestations. - if: startsWith(github.ref, 'refs/tags/v') - uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.0.0 - with: - image: ${{ needs.release.outputs.image_url }} - digest: ${{ needs.release.outputs.image_digest }} - registry-username: fluxcdbot - secrets: - registry-password: ${{ secrets.DOCKER_FLUXCD_PASSWORD }} - - ghcr-provenance: - needs: [release] - permissions: - actions: read # for detecting the Github Actions environment. - id-token: write # for creating OIDC tokens for signing. - packages: write # for uploading attestations. - if: startsWith(github.ref, 'refs/tags/v') - uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.0.0 - with: - image: ghcr.io/${{ needs.release.outputs.image_url }} - digest: ${{ needs.release.outputs.image_digest }} - registry-username: fluxcdbot - secrets: - registry-password: ${{ secrets.GHCR_TOKEN }} diff --git a/.github/workflows/scan.yaml b/.github/workflows/scan.yaml index 91298bc23..ea8e992de 100644 --- a/.github/workflows/scan.yaml +++ b/.github/workflows/scan.yaml @@ -1,52 +1,17 @@ name: scan - on: push: - branches: [ 'main', 'release/**' ] + branches: [ main ] pull_request: - branches: [ 'main', 'release/**' ] + branches: [ main ] schedule: - cron: '18 10 * * 3' - -permissions: - contents: read # for actions/checkout to fetch code - security-events: write # for codeQL to write security events - jobs: - fossa: - name: FOSSA - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Run FOSSA scan and upload build data - uses: fossa-contrib/fossa-action@cdc5065bcdee31a32e47d4585df72d66e8e941c2 # v3.0.0 - with: - # FOSSA Push-Only API Token - fossa-api-key: 5ee8bf422db1471e0bcf2bcb289185de - github-token: ${{ github.token }} - - codeql: - name: CodeQL - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Setup Go - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 - with: - go-version: 1.23.x - cache-dependency-path: | - **/go.sum - **/go.mod - - name: Initialize CodeQL - uses: github/codeql-action/init@dd746615b3b9d728a6a37ca2045b68ca76d4841a # v3.28.8 - with: - languages: go - # xref: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs - # xref: https://codeql.github.com/codeql-query-help/go/ - queries: security-and-quality - - name: Autobuild - uses: github/codeql-action/autobuild@dd746615b3b9d728a6a37ca2045b68ca76d4841a # v3.28.8 - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@dd746615b3b9d728a6a37ca2045b68ca76d4841a # v3.28.8 + analyze: + permissions: + contents: read # for reading the repository code. + security-events: write # for uploading the CodeQL analysis results. + uses: fluxcd/gha-workflows/.github/workflows/code-scan.yaml@v0.4.0 + secrets: + github-token: ${{ secrets.GITHUB_TOKEN }} + fossa-token: ${{ secrets.FOSSA_TOKEN }} diff --git a/.github/workflows/sync-labels.yaml b/.github/workflows/sync-labels.yaml index d0c2c8816..a4635094d 100644 --- a/.github/workflows/sync-labels.yaml +++ b/.github/workflows/sync-labels.yaml @@ -6,23 +6,11 @@ on: - main paths: - .github/labels.yaml - -permissions: - contents: read - jobs: - labels: - name: Run sync - runs-on: ubuntu-latest + sync-labels: permissions: - issues: write - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: EndBug/label-sync@52074158190acb45f3077f9099fea818aa43f97a # v2.3.3 - with: - # Configuration file - config-file: | - https://raw.githubusercontent.com/fluxcd/community/main/.github/standard-labels.yaml - .github/labels.yaml - # Strictly declarative - delete-other-labels: true + contents: read # for reading the labels file. + issues: write # for creating and updating labels. + uses: fluxcd/gha-workflows/.github/workflows/labels-sync.yaml@v0.4.0 + secrets: + github-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml new file mode 100644 index 000000000..c7a9aa2e8 --- /dev/null +++ b/.github/workflows/test.yaml @@ -0,0 +1,22 @@ +name: test +on: + workflow_dispatch: + pull_request: + push: + branches: + - 'main' + - 'release/**' +jobs: + test-linux-amd64: + runs-on: ubuntu-latest + steps: + - name: Test suite setup + uses: fluxcd/gha-workflows/.github/actions/setup-kubernetes@v0.4.0 + with: + go-version: 1.25.x + - name: Run tests + env: + SKIP_COSIGN_VERIFICATION: true + TEST_AZURE_ACCOUNT_NAME: ${{ secrets.TEST_AZURE_ACCOUNT_NAME }} + TEST_AZURE_ACCOUNT_KEY: ${{ secrets.TEST_AZURE_ACCOUNT_KEY }} + run: make test diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml deleted file mode 100644 index 81c867d4a..000000000 --- a/.github/workflows/tests.yaml +++ /dev/null @@ -1,57 +0,0 @@ -name: tests - -on: - workflow_dispatch: - pull_request: - branches: - - 'main' - - 'release/**' - push: - branches: - - 'main' - - 'release/**' - -permissions: - contents: read # for actions/checkout to fetch code - -jobs: - - test-linux-amd64: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Setup Go - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 - with: - go-version: 1.23.x - cache-dependency-path: | - **/go.sum - **/go.mod - - name: Run tests - env: - SKIP_COSIGN_VERIFICATION: true - TEST_AZURE_ACCOUNT_NAME: ${{ secrets.TEST_AZURE_ACCOUNT_NAME }} - TEST_AZURE_ACCOUNT_KEY: ${{ secrets.TEST_AZURE_ACCOUNT_KEY }} - run: make test - - test-linux-arm64: - runs-on: - group: "ARM64" - if: github.actor != 'dependabot[bot]' - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Setup Go - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 - with: - go-version: 1.23.x - cache-dependency-path: | - **/go.sum - **/go.mod - - name: Run tests - env: - SKIP_COSIGN_VERIFICATION: true - TEST_AZURE_ACCOUNT_NAME: ${{ secrets.TEST_AZURE_ACCOUNT_NAME }} - TEST_AZURE_ACCOUNT_KEY: ${{ secrets.TEST_AZURE_ACCOUNT_KEY }} - run: make test diff --git a/.github/workflows/verify.yaml b/.github/workflows/verify.yaml deleted file mode 100644 index 02d61143a..000000000 --- a/.github/workflows/verify.yaml +++ /dev/null @@ -1,31 +0,0 @@ -name: verify - -on: - pull_request: - branches: - - 'main' - - 'release/**' - push: - branches: - - 'main' - - 'release/**' - -permissions: - contents: read # for actions/checkout to fetch code - -jobs: - - verify-linux-amd64: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Setup Go - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 - with: - go-version: 1.23.x - cache-dependency-path: | - **/go.sum - **/go.mod - - name: Verify - run: make verify diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ba1492bf..74cb010a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,182 @@ All notable changes to this project are documented in this file. +## 1.7.0 + +**Release date:** 2025-09-15 + +This minor release comes with new features, improvements and bug fixes. + +### ExternalArtifact + +A new [ExternalArtifact](https://github.com/fluxcd/source-controller/blob/main/docs/spec/v1/externalartifacts.md) API has been added to the `source.toolkit.fluxcd.io` group. This API enables advanced source composition and decomposition patterns implemented by the [source-watcher](https://github.com/fluxcd/source-watcher) controller. + +### GitRepository + +GitRepository controller now includes fixes for stalling issues and improved error handling. Multi-tenant workload identity support has been added for Azure repositories when the `ObjectLevelWorkloadIdentity` feature gate is enabled. TLS configuration support has been added for GitHub App authentication. + +### Bucket + +Bucket controller now supports multi-tenant workload identity for AWS, Azure and GCP providers when the `ObjectLevelWorkloadIdentity` feature gate is enabled. A default service account flag has been added for lockdown scenarios. + +### General updates + +The controller now supports system certificate pools for improved CA compatibility, and TLS ServerName pinning has been removed from TLS configuration for better flexibility. A `--default-service-account=` flag was introduced for workload identity multi-tenancy lockdown. + +In addition, the Kubernetes dependencies have been updated to v1.34, Helm +has been updated to v3.19 and various other controller dependencies have +been updated to their latest version. The controller is now built with +Go 1.25. + +Fixes: +- Fix GitRepository controller stalling when it shouldn't + [#1865](https://github.com/fluxcd/source-controller/pull/1865) + +Improvements: +- [RFC-0010] Add multi-tenant workload identity support for GCP Bucket + [#1862](https://github.com/fluxcd/source-controller/pull/1862) +- [RFC-0010] Add multi-tenant workload identity support for AWS Bucket + [#1868](https://github.com/fluxcd/source-controller/pull/1868) +- [RFC-0010] Add multi-tenant workload identity support for Azure GitRepository + [#1871](https://github.com/fluxcd/source-controller/pull/1871) +- [RFC-0010] Add default-service-account for lockdown + [#1872](https://github.com/fluxcd/source-controller/pull/1872) +- [RFC-0010] Add multi-tenant workload identity support for Azure Blob Storage + [#1875](https://github.com/fluxcd/source-controller/pull/1875) +- [RFC-0012] Add ExternalArtifact API documentation + [#1881](https://github.com/fluxcd/source-controller/pull/1881) +- [RFC-0012] Refactor controller to use `fluxcd/pkg/artifact` + [#1883](https://github.com/fluxcd/source-controller/pull/1883) +- Migrate OCIRepository controller to runtime/secrets + [#1851](https://github.com/fluxcd/source-controller/pull/1851) +- Migrate Bucket controller to runtime/secrets + [#1852](https://github.com/fluxcd/source-controller/pull/1852) +- Add TLS config for GitHub App authentication + [#1860](https://github.com/fluxcd/source-controller/pull/1860) +- Remove ServerName pinning from TLS config + [#1870](https://github.com/fluxcd/source-controller/pull/1870) +- Extract storage operations to a dedicated package + [#1864](https://github.com/fluxcd/source-controller/pull/1864) +- Remove deprecated APIs in group `source.toolkit.fluxcd.io/v1beta1` + [#1861](https://github.com/fluxcd/source-controller/pull/1861) +- Migrate tests from gotest to gomega + [#1876](https://github.com/fluxcd/source-controller/pull/1876) +- Update dependencies + [#1888](https://github.com/fluxcd/source-controller/pull/1888) + [#1880](https://github.com/fluxcd/source-controller/pull/1880) + [#1878](https://github.com/fluxcd/source-controller/pull/1878) + [#1876](https://github.com/fluxcd/source-controller/pull/1876) + [#1874](https://github.com/fluxcd/source-controller/pull/1874) + [#1850](https://github.com/fluxcd/source-controller/pull/1850) + [#1844](https://github.com/fluxcd/source-controller/pull/1844) + +## 1.6.2 + +**Release date:** 2025-06-27 + +This patch release comes with a fix for `rsa-sha2-512` and `rsa-sha2-256` algorithms +not being prioritized for `ssh-rsa` host keys. + +Fixes: +- Fix: Prioritize sha2-512 and sha2-256 for ssh-rsa host keys + [#1839](https://github.com/fluxcd/source-controller/pull/1839) + +## 1.6.1 + +**Release date:** 2025-06-13 + +This patch release comes with a fix for the `knownhosts: key mismatch` +error in the `GitRepository` API when using SSH authentication, and +a fix for authentication with +[public ECR repositories](https://fluxcd.io/flux/integrations/aws/#for-amazon-public-elastic-container-registry) +in the `OCIRepository` API. + +Fix: +- Fix authentication for public ECR + [#1825](https://github.com/fluxcd/source-controller/pull/1825) +- Fix `knownhosts key mismatch` regression bug + [#1829](https://github.com/fluxcd/source-controller/pull/1829) + +## 1.6.0 + +**Release date:** 2025-05-27 + +This minor release promotes the OCIRepository API to GA, and comes with new features, +improvements and bug fixes. + +### OCIRepository + +The `OCIRepository` API has been promoted from `v1beta2` to `v1` (GA). +The `v1` API is backwards compatible with `v1beta2`. + +OCIRepository API now supports object-level workload identity by setting +`.spec.provider` to one of `aws`, `azure`, or `gcp`, and setting +`.spec.serviceAccountName` to the name of a service account in the same +namespace that has been configured with appropriate cloud permissions. +For this feature to work, the controller feature gate +`ObjectLevelWorkloadIdentity` must be enabled. See a complete guide +[here](https://fluxcd.io/flux/integrations/). + +OCIRepository API now caches registry credentials for cloud providers +by default. This behavior can be disabled or fine-tuned by adjusting the +token cache controller flags (see [docs](https://fluxcd.io/flux/components/source/options/)). +The token cache also exposes metrics that are documented +[here](https://fluxcd.io/flux/monitoring/metrics/#controller-metrics). + +### GitRepository + +GitRepository API now supports sparse checkout by setting a list +of directories in the `.spec.sparseCheckout` field. This allows +for optimizing the amount of data fetched from the Git repository. + +GitRepository API now supports mTLS authentication for HTTPS Git repositories +by setting the fields `tls.crt`, `tls.key`, and `ca.crt` in the `.data` field +of the referenced Secret in `.spec.secretRef`. + +GitRepository API now caches credentials for non-`generic` providers by default. +This behavior can be disabled or fine-tuned by adjusting the +token cache controller flags (see [docs](https://fluxcd.io/flux/components/source/options/)). +The token cache also exposes metrics that are documented +[here](https://fluxcd.io/flux/monitoring/metrics/#controller-metrics). + +### General updates + +In addition, the Kubernetes dependencies have been updated to v1.33 and +various other controller dependencies have been updated to their latest +version. The controller is now built with Go 1.24. + +Fixes: +- Downgrade `Masterminds/semver` to v3.3.0 + [#1785](https://github.com/fluxcd/source-controller/pull/1785) + +Improvements: +- Promote OCIRepository API to v1 (GA) + [#1794](https://github.com/fluxcd/source-controller/pull/1794) +- [RFC-0010] Introduce object-level workload identity for container registry APIs and cache credentials + [#1790](https://github.com/fluxcd/source-controller/pull/1790) + [#1802](https://github.com/fluxcd/source-controller/pull/1802) + [#1811](https://github.com/fluxcd/source-controller/pull/1811) +- Implement Sparse Checkout for `GitRepository` + [#1774](https://github.com/fluxcd/source-controller/pull/1774) +- Add Mutual TLS support to `GitRepository` + [#1778](https://github.com/fluxcd/source-controller/pull/1778) +- Introduce token cache for `GitRepository` + [#1745](https://github.com/fluxcd/source-controller/pull/1745) + [#1788](https://github.com/fluxcd/source-controller/pull/1788) + [#1789](https://github.com/fluxcd/source-controller/pull/1789) +- Build controller without CGO + [#1725](https://github.com/fluxcd/source-controller/pull/1725) +- Various dependency updates + [#1812](https://github.com/fluxcd/source-controller/pull/1812) + [#1800](https://github.com/fluxcd/source-controller/pull/1800) + [#1810](https://github.com/fluxcd/source-controller/pull/1810) + [#1806](https://github.com/fluxcd/source-controller/pull/1806) + [#1782](https://github.com/fluxcd/source-controller/pull/1782) + [#1783](https://github.com/fluxcd/source-controller/pull/1783) + [#1775](https://github.com/fluxcd/source-controller/pull/1775) + [#1728](https://github.com/fluxcd/source-controller/pull/1728) + [#1722](https://github.com/fluxcd/source-controller/pull/1722) + ## 1.5.0 **Release date:** 2025-02-13 diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 6a6be3c1c..11d05ad83 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -15,7 +15,7 @@ There are a number of dependencies required to be able to run the controller and In addition to the above, the following dependencies are also used by some of the `make` targets: -- `controller-gen` (v0.12.0) +- `controller-gen` (v0.19.0) - `gen-crd-api-reference-docs` (v0.3.0) - `setup-envtest` (latest) @@ -24,7 +24,7 @@ If any of the above dependencies are not present on your system, the first invoc ## How to run the test suite Prerequisites: -* Go >= 1.23 +* Go >= 1.25 You can run the test suite by simply doing diff --git a/Dockerfile b/Dockerfile index a16a8fb50..0f7c6f849 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.23 +ARG GO_VERSION=1.25 ARG XX_VERSION=1.6.1 FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx @@ -26,7 +26,6 @@ RUN go mod download # Copy source code COPY main.go main.go -COPY pkg/ pkg/ COPY internal/ internal/ ARG TARGETPLATFORM @@ -36,7 +35,7 @@ ARG TARGETARCH ENV CGO_ENABLED=0 RUN xx-go build -trimpath -a -o source-controller main.go -FROM alpine:3.21 +FROM alpine:3.22 ARG TARGETPLATFORM RUN apk --no-cache add ca-certificates \ diff --git a/Makefile b/Makefile index 49c45d621..28226af5d 100644 --- a/Makefile +++ b/Makefile @@ -38,7 +38,7 @@ FUZZ_TIME ?= 1m GO_STATIC_FLAGS=-ldflags "-s -w" -tags 'netgo,osusergo,static_build$(addprefix ,,$(GO_TAGS))' # API (doc) generation utilities -CONTROLLER_GEN_VERSION ?= v0.16.1 +CONTROLLER_GEN_VERSION ?= v0.19.0 GEN_API_REF_DOCS_VERSION ?= e327d0730470cbd61b06300f81c5fcf91c23c113 # If gobin not set, create one on ./build and add to path. @@ -115,12 +115,11 @@ manifests: controller-gen ## Generate manifests, e.g. CRD, RBAC, etc. cd api; $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role paths="./..." output:crd:artifacts:config="../config/crd/bases" api-docs: gen-crd-api-reference-docs ## Generate API reference documentation - $(GEN_CRD_API_REFERENCE_DOCS) -api-dir=./api/v1beta2 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/v1beta2/source.md $(GEN_CRD_API_REFERENCE_DOCS) -api-dir=./api/v1 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/v1/source.md tidy: ## Run go mod tidy - cd api; rm -f go.sum; go mod tidy -compat=1.23 - rm -f go.sum; go mod tidy -compat=1.23 + cd api; rm -f go.sum; go mod tidy -compat=1.25 + rm -f go.sum; go mod tidy -compat=1.25 fmt: ## Run go fmt against code go fmt ./... diff --git a/PROJECT b/PROJECT index 0c243993c..9d89d81be 100644 --- a/PROJECT +++ b/PROJECT @@ -40,4 +40,10 @@ resources: - group: source kind: Bucket version: v1 +- group: source + kind: OCIRepository + version: v1 +- group: source + kind: ExternalArtifact + version: v1 version: "2" diff --git a/README.md b/README.md index 1838328d2..6f07b2e00 100644 --- a/README.md +++ b/README.md @@ -16,13 +16,13 @@ and is a core component of the [GitOps toolkit](https://fluxcd.io/flux/component ## APIs -| Kind | API Version | -|-------------------------------------------------------|------------------------------------| -| [GitRepository](docs/spec/v1/gitrepositories.md) | `source.toolkit.fluxcd.io/v1` | -| [OCIRepository](docs/spec/v1beta2/ocirepositories.md) | `source.toolkit.fluxcd.io/v1beta2` | -| [HelmRepository](docs/spec/v1/helmrepositories.md) | `source.toolkit.fluxcd.io/v1` | -| [HelmChart](docs/spec/v1/helmcharts.md) | `source.toolkit.fluxcd.io/v1` | -| [Bucket](docs/spec/v1/buckets.md) | `source.toolkit.fluxcd.io/v1` | +| Kind | API Version | +|----------------------------------------------------|-------------------------------| +| [GitRepository](docs/spec/v1/gitrepositories.md) | `source.toolkit.fluxcd.io/v1` | +| [OCIRepository](docs/spec/v1/ocirepositories.md) | `source.toolkit.fluxcd.io/v1` | +| [HelmRepository](docs/spec/v1/helmrepositories.md) | `source.toolkit.fluxcd.io/v1` | +| [HelmChart](docs/spec/v1/helmcharts.md) | `source.toolkit.fluxcd.io/v1` | +| [Bucket](docs/spec/v1/buckets.md) | `source.toolkit.fluxcd.io/v1` | ## Features diff --git a/api/go.mod b/api/go.mod index b0800871e..3d821f349 100644 --- a/api/go.mod +++ b/api/go.mod @@ -1,35 +1,34 @@ module github.com/fluxcd/source-controller/api -go 1.23.0 +go 1.25.0 require ( - github.com/fluxcd/pkg/apis/acl v0.6.0 - github.com/fluxcd/pkg/apis/meta v1.10.0 - k8s.io/apimachinery v0.32.1 - sigs.k8s.io/controller-runtime v0.20.1 + github.com/fluxcd/pkg/apis/acl v0.9.0 + github.com/fluxcd/pkg/apis/meta v1.21.0 + k8s.io/apimachinery v0.34.0 + sigs.k8s.io/controller-runtime v0.22.1 ) // Fix CVE-2022-28948 replace gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1 require ( - github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/google/gofuzz v1.2.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/x448/float16 v0.8.4 // indirect - golang.org/x/net v0.34.0 // indirect - golang.org/x/text v0.21.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/text v0.28.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect - sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect + k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect ) diff --git a/api/go.sum b/api/go.sum index 10f3e77d7..1aa815d66 100644 --- a/api/go.sum +++ b/api/go.sum @@ -1,26 +1,22 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fluxcd/pkg/apis/acl v0.6.0 h1:rllf5uQLzTow81ZCslkQ6LPpDNqVQr6/fWaNksdUEtc= -github.com/fluxcd/pkg/apis/acl v0.6.0/go.mod h1:IVDZx3MAoDWjlLrJHMF9Z27huFuXAEQlnbWw0M6EcTs= -github.com/fluxcd/pkg/apis/meta v1.10.0 h1:rqbAuyl5ug7A5jjRf/rNwBXmNl6tJ9wG2iIsriwnQUk= -github.com/fluxcd/pkg/apis/meta v1.10.0/go.mod h1:n7NstXHDaleAUMajcXTVkhz0MYkvEXy1C/eLI/t1xoI= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/fluxcd/pkg/apis/acl v0.9.0 h1:wBpgsKT+jcyZEcM//OmZr9RiF8klL3ebrDp2u2ThsnA= +github.com/fluxcd/pkg/apis/acl v0.9.0/go.mod h1:TttNS+gocsGLwnvmgVi3/Yscwqrjc17+vhgYfqkfrV4= +github.com/fluxcd/pkg/apis/meta v1.21.0 h1:R+bN02chcs0HUmyVDQhqe/FHmYLjipVDMLnyYfNX850= +github.com/fluxcd/pkg/apis/meta v1.21.0/go.mod h1:XUAEUgT4gkWDAEN79E141tmL+v4SV50tVZ/Ojpc/ueg= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -37,29 +33,31 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= -github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= -github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -69,26 +67,26 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= -golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= -golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= +golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -100,19 +98,21 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= -k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0= -k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= -k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE= +k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug= +k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0= +k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.20.1 h1:JbGMAG/X94NeM3xvjenVUaBjy6Ui4Ogd/J5ZtjZnHaE= -sigs.k8s.io/controller-runtime v0.20.1/go.mod h1:BrP3w158MwvB3ZbNpaAcIKkHQ7YGpYnzpoSTZ8E14WU= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.22.1 h1:Ah1T7I+0A7ize291nJZdS1CabF/lB4E++WizgV24Eqg= +sigs.k8s.io/controller-runtime v0.22.1/go.mod h1:FwiwRjkRPbiN+zp2QRp7wlTCzbUXxZ/D4OzuQUDwBHY= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/api/v1/artifact_types.go b/api/v1/artifact_types.go deleted file mode 100644 index 9342ecfa6..000000000 --- a/api/v1/artifact_types.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2023 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "path" - "strings" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Artifact represents the output of a Source reconciliation. -type Artifact struct { - // Path is the relative file path of the Artifact. It can be used to locate - // the file in the root of the Artifact storage on the local file system of - // the controller managing the Source. - // +required - Path string `json:"path"` - - // URL is the HTTP address of the Artifact as exposed by the controller - // managing the Source. It can be used to retrieve the Artifact for - // consumption, e.g. by another controller applying the Artifact contents. - // +required - URL string `json:"url"` - - // Revision is a human-readable identifier traceable in the origin source - // system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. - // +required - Revision string `json:"revision"` - - // Digest is the digest of the file in the form of ':'. - // +optional - // +kubebuilder:validation:Pattern="^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$" - Digest string `json:"digest,omitempty"` - - // LastUpdateTime is the timestamp corresponding to the last update of the - // Artifact. - // +required - LastUpdateTime metav1.Time `json:"lastUpdateTime"` - - // Size is the number of bytes in the file. - // +optional - Size *int64 `json:"size,omitempty"` - - // Metadata holds upstream information such as OCI annotations. - // +optional - Metadata map[string]string `json:"metadata,omitempty"` -} - -// HasRevision returns if the given revision matches the current Revision of -// the Artifact. -func (in *Artifact) HasRevision(revision string) bool { - if in == nil { - return false - } - return in.Revision == revision -} - -// HasDigest returns if the given digest matches the current Digest of the -// Artifact. -func (in *Artifact) HasDigest(digest string) bool { - if in == nil { - return false - } - return in.Digest == digest -} - -// ArtifactDir returns the artifact dir path in the form of -// '//'. -func ArtifactDir(kind, namespace, name string) string { - kind = strings.ToLower(kind) - return path.Join(kind, namespace, name) -} - -// ArtifactPath returns the artifact path in the form of -// '//name>/'. -func ArtifactPath(kind, namespace, name, filename string) string { - return path.Join(ArtifactDir(kind, namespace, name), filename) -} diff --git a/api/v1/bucket_types.go b/api/v1/bucket_types.go index 2c733a6cc..bbedcefb3 100644 --- a/api/v1/bucket_types.go +++ b/api/v1/bucket_types.go @@ -33,7 +33,8 @@ const ( // BucketProviderGeneric for any S3 API compatible storage Bucket. BucketProviderGeneric string = "generic" // BucketProviderAmazon for an AWS S3 object storage Bucket. - // Provides support for retrieving credentials from the AWS EC2 service. + // Provides support for retrieving credentials from the AWS EC2 service + // and workload identity authentication. BucketProviderAmazon string = "aws" // BucketProviderGoogle for a Google Cloud Storage Bucket. // Provides support for authentication using a workload identity. @@ -51,6 +52,8 @@ const ( // +kubebuilder:validation:XValidation:rule="self.provider != 'generic' || !has(self.sts) || self.sts.provider == 'ldap'", message="'ldap' is the only supported STS provider for the 'generic' Bucket provider" // +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.secretRef)", message="spec.sts.secretRef is not required for the 'aws' STS provider" // +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.certSecretRef)", message="spec.sts.certSecretRef is not required for the 'aws' STS provider" +// +kubebuilder:validation:XValidation:rule="self.provider != 'generic' || !has(self.serviceAccountName)", message="ServiceAccountName is not supported for the 'generic' Bucket provider" +// +kubebuilder:validation:XValidation:rule="!has(self.secretRef) || !has(self.serviceAccountName)", message="cannot set both .spec.secretRef and .spec.serviceAccountName" type BucketSpec struct { // Provider of the object storage bucket. // Defaults to 'generic', which expects an S3 (API) compatible object @@ -93,6 +96,13 @@ type BucketSpec struct { // +optional SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + // ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate + // the bucket. This field is only supported for the 'gcp' and 'aws' providers. + // For more information about workload identity: + // https://fluxcd.io/flux/components/source/buckets/#workload-identity + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + // CertSecretRef can be given the name of a Secret containing // either or both of // @@ -199,7 +209,7 @@ type BucketStatus struct { // Artifact represents the last successful Bucket reconciliation. // +optional - Artifact *Artifact `json:"artifact,omitempty"` + Artifact *meta.Artifact `json:"artifact,omitempty"` // ObservedIgnore is the observed exclusion patterns used for constructing // the source artifact. @@ -235,7 +245,7 @@ func (in *Bucket) GetRequeueAfter() time.Duration { } // GetArtifact returns the latest artifact from the source if present in the status sub-resource. -func (in *Bucket) GetArtifact() *Artifact { +func (in *Bucket) GetArtifact() *meta.Artifact { return in.Status.Artifact } diff --git a/api/v1/externalartifact_types.go b/api/v1/externalartifact_types.go new file mode 100644 index 000000000..e338b733b --- /dev/null +++ b/api/v1/externalartifact_types.go @@ -0,0 +1,97 @@ +/* +Copyright 2025 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/meta" +) + +// ExternalArtifactKind is the string representation of the ExternalArtifact. +const ExternalArtifactKind = "ExternalArtifact" + +// ExternalArtifactSpec defines the desired state of ExternalArtifact +type ExternalArtifactSpec struct { + // SourceRef points to the Kubernetes custom resource for + // which the artifact is generated. + // +optional + SourceRef *meta.NamespacedObjectKindReference `json:"sourceRef,omitempty"` +} + +// ExternalArtifactStatus defines the observed state of ExternalArtifact +type ExternalArtifactStatus struct { + // Artifact represents the output of an ExternalArtifact reconciliation. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + // Conditions holds the conditions for the ExternalArtifact. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// GetConditions returns the status conditions of the object. +func (in *ExternalArtifact) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *ExternalArtifact) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetArtifact returns the latest Artifact from the ExternalArtifact if +// present in the status sub-resource. +func (in *ExternalArtifact) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// GetRequeueAfter returns the duration after which the ExternalArtifact +// must be reconciled again. +func (in *ExternalArtifact) GetRequeueAfter() time.Duration { + return time.Minute +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" +// +kubebuilder:printcolumn:name="Source",type="string",JSONPath=".spec.sourceRef.name",description="" + +// ExternalArtifact is the Schema for the external artifacts API +type ExternalArtifact struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ExternalArtifactSpec `json:"spec,omitempty"` + Status ExternalArtifactStatus `json:"status,omitempty"` +} + +// ExternalArtifactList contains a list of ExternalArtifact +// +kubebuilder:object:root=true +type ExternalArtifactList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ExternalArtifact `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ExternalArtifact{}, &ExternalArtifactList{}) +} diff --git a/api/v1/gitrepository_types.go b/api/v1/gitrepository_types.go index 20ef37d0c..f104fd0f1 100644 --- a/api/v1/gitrepository_types.go +++ b/api/v1/gitrepository_types.go @@ -77,6 +77,7 @@ const ( // GitRepositorySpec specifies the required configuration to produce an // Artifact for a Git repository. +// +kubebuilder:validation:XValidation:rule="!has(self.serviceAccountName) || (has(self.provider) && self.provider == 'azure')",message="serviceAccountName can only be set when provider is 'azure'" type GitRepositorySpec struct { // URL specifies the Git repository URL, it can be an HTTP/S or SSH address. // +kubebuilder:validation:Pattern="^(http|https|ssh)://.*$" @@ -98,6 +99,11 @@ type GitRepositorySpec struct { // +optional Provider string `json:"provider,omitempty"` + // ServiceAccountName is the name of the Kubernetes ServiceAccount used to + // authenticate to the GitRepository. This field is only supported for 'azure' provider. + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + // Interval at which the GitRepository URL is checked for updates. // This interval is approximate and may be subject to jitter to ensure // efficient use of resources. @@ -148,6 +154,12 @@ type GitRepositorySpec struct { // should be included in the Artifact produced for this GitRepository. // +optional Include []GitRepositoryInclude `json:"include,omitempty"` + + // SparseCheckout specifies a list of directories to checkout when cloning + // the repository. If specified, only these directories are included in the + // Artifact produced for this GitRepository. + // +optional + SparseCheckout []string `json:"sparseCheckout,omitempty"` } // GitRepositoryInclude specifies a local reference to a GitRepository which @@ -244,12 +256,12 @@ type GitRepositoryStatus struct { // Artifact represents the last successful GitRepository reconciliation. // +optional - Artifact *Artifact `json:"artifact,omitempty"` + Artifact *meta.Artifact `json:"artifact,omitempty"` // IncludedArtifacts contains a list of the last successfully included // Artifacts as instructed by GitRepositorySpec.Include. // +optional - IncludedArtifacts []*Artifact `json:"includedArtifacts,omitempty"` + IncludedArtifacts []*meta.Artifact `json:"includedArtifacts,omitempty"` // ObservedIgnore is the observed exclusion patterns used for constructing // the source artifact. @@ -266,6 +278,11 @@ type GitRepositoryStatus struct { // +optional ObservedInclude []GitRepositoryInclude `json:"observedInclude,omitempty"` + // ObservedSparseCheckout is the observed list of directories used to + // produce the current Artifact. + // +optional + ObservedSparseCheckout []string `json:"observedSparseCheckout,omitempty"` + // SourceVerificationMode is the last used verification mode indicating // which Git object(s) have been verified. // +optional @@ -302,7 +319,7 @@ func (in GitRepository) GetRequeueAfter() time.Duration { // GetArtifact returns the latest Artifact from the GitRepository if present in // the status sub-resource. -func (in *GitRepository) GetArtifact() *Artifact { +func (in *GitRepository) GetArtifact() *meta.Artifact { return in.Status.Artifact } diff --git a/api/v1/helmchart_types.go b/api/v1/helmchart_types.go index 137b16450..23cb24146 100644 --- a/api/v1/helmchart_types.go +++ b/api/v1/helmchart_types.go @@ -149,7 +149,7 @@ type HelmChartStatus struct { // Artifact represents the output of the last successful reconciliation. // +optional - Artifact *Artifact `json:"artifact,omitempty"` + Artifact *meta.Artifact `json:"artifact,omitempty"` meta.ReconcileRequestStatus `json:",inline"` } @@ -182,7 +182,7 @@ func (in HelmChart) GetRequeueAfter() time.Duration { // GetArtifact returns the latest artifact from the source if present in the // status sub-resource. -func (in *HelmChart) GetArtifact() *Artifact { +func (in *HelmChart) GetArtifact() *meta.Artifact { return in.Status.Artifact } diff --git a/api/v1/helmrepository_types.go b/api/v1/helmrepository_types.go index 2a21f2c52..1c19064a5 100644 --- a/api/v1/helmrepository_types.go +++ b/api/v1/helmrepository_types.go @@ -150,7 +150,7 @@ type HelmRepositoryStatus struct { // Artifact represents the last successful HelmRepository reconciliation. // +optional - Artifact *Artifact `json:"artifact,omitempty"` + Artifact *meta.Artifact `json:"artifact,omitempty"` meta.ReconcileRequestStatus `json:",inline"` } @@ -191,7 +191,7 @@ func (in HelmRepository) GetTimeout() time.Duration { // GetArtifact returns the latest artifact from the source if present in the // status sub-resource. -func (in *HelmRepository) GetArtifact() *Artifact { +func (in *HelmRepository) GetArtifact() *meta.Artifact { return in.Status.Artifact } diff --git a/api/v1/ocirepository_types.go b/api/v1/ocirepository_types.go new file mode 100644 index 000000000..8c4d3f0fc --- /dev/null +++ b/api/v1/ocirepository_types.go @@ -0,0 +1,296 @@ +/* +Copyright 2025 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // OCIRepositoryKind is the string representation of an OCIRepository. + OCIRepositoryKind = "OCIRepository" + + // OCIRepositoryPrefix is the prefix used for OCIRepository URLs. + OCIRepositoryPrefix = "oci://" + + // GenericOCIProvider provides support for authentication using static credentials + // for any OCI compatible API such as Docker Registry, GitHub Container Registry, + // Docker Hub, Quay, etc. + GenericOCIProvider string = "generic" + + // AmazonOCIProvider provides support for OCI authentication using AWS IRSA. + AmazonOCIProvider string = "aws" + + // GoogleOCIProvider provides support for OCI authentication using GCP workload identity. + GoogleOCIProvider string = "gcp" + + // AzureOCIProvider provides support for OCI authentication using a Azure Service Principal, + // Managed Identity or Shared Key. + AzureOCIProvider string = "azure" + + // OCILayerExtract defines the operation type for extracting the content from an OCI artifact layer. + OCILayerExtract = "extract" + + // OCILayerCopy defines the operation type for copying the content from an OCI artifact layer. + OCILayerCopy = "copy" +) + +// OCIRepositorySpec defines the desired state of OCIRepository +type OCIRepositorySpec struct { + // URL is a reference to an OCI artifact repository hosted + // on a remote container registry. + // +kubebuilder:validation:Pattern="^oci://.*$" + // +required + URL string `json:"url"` + + // The OCI reference to pull and monitor for changes, + // defaults to the latest tag. + // +optional + Reference *OCIRepositoryRef `json:"ref,omitempty"` + + // LayerSelector specifies which layer should be extracted from the OCI artifact. + // When not specified, the first layer found in the artifact is selected. + // +optional + LayerSelector *OCILayerSelector `json:"layerSelector,omitempty"` + + // The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + // When not specified, defaults to 'generic'. + // +kubebuilder:validation:Enum=generic;aws;azure;gcp + // +kubebuilder:default:=generic + // +optional + Provider string `json:"provider,omitempty"` + + // SecretRef contains the secret name containing the registry login + // credentials to resolve image metadata. + // The secret must be of type kubernetes.io/dockerconfigjson. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // Verify contains the secret name containing the trusted public keys + // used to verify the signature and specifies which provider to use to check + // whether OCI image is authentic. + // +optional + Verify *OCIRepositoryVerification `json:"verify,omitempty"` + + // ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate + // the image pull if the service account has attached pull secrets. For more information: + // https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` + + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // registry. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` + + // ProxySecretRef specifies the Secret containing the proxy configuration + // to use while communicating with the container registry. + // +optional + ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"` + + // Interval at which the OCIRepository URL is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +required + Interval metav1.Duration `json:"interval"` + + // The timeout for remote OCI Repository operations like pulling, defaults to 60s. + // +kubebuilder:default="60s" + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Ignore overrides the set of excluded patterns in the .sourceignore format + // (which is the same as .gitignore). If not provided, a default will be used, + // consult the documentation for your version to find out what those are. + // +optional + Ignore *string `json:"ignore,omitempty"` + + // Insecure allows connecting to a non-TLS HTTP container registry. + // +optional + Insecure bool `json:"insecure,omitempty"` + + // This flag tells the controller to suspend the reconciliation of this source. + // +optional + Suspend bool `json:"suspend,omitempty"` +} + +// OCIRepositoryRef defines the image reference for the OCIRepository's URL +type OCIRepositoryRef struct { + // Digest is the image digest to pull, takes precedence over SemVer. + // The value should be in the format 'sha256:'. + // +optional + Digest string `json:"digest,omitempty"` + + // SemVer is the range of tags to pull selecting the latest within + // the range, takes precedence over Tag. + // +optional + SemVer string `json:"semver,omitempty"` + + // SemverFilter is a regex pattern to filter the tags within the SemVer range. + // +optional + SemverFilter string `json:"semverFilter,omitempty"` + + // Tag is the image tag to pull, defaults to latest. + // +optional + Tag string `json:"tag,omitempty"` +} + +// OCILayerSelector specifies which layer should be extracted from an OCI Artifact +type OCILayerSelector struct { + // MediaType specifies the OCI media type of the layer + // which should be extracted from the OCI Artifact. The + // first layer matching this type is selected. + // +optional + MediaType string `json:"mediaType,omitempty"` + + // Operation specifies how the selected layer should be processed. + // By default, the layer compressed content is extracted to storage. + // When the operation is set to 'copy', the layer compressed content + // is persisted to storage as it is. + // +kubebuilder:validation:Enum=extract;copy + // +optional + Operation string `json:"operation,omitempty"` +} + +// OCIRepositoryStatus defines the observed state of OCIRepository +type OCIRepositoryStatus struct { + // ObservedGeneration is the last observed generation. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the OCIRepository. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the download link for the artifact output of the last OCI Repository sync. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the output of the last successful OCI Repository sync. + // +optional + Artifact *meta.Artifact `json:"artifact,omitempty"` + + // ObservedIgnore is the observed exclusion patterns used for constructing + // the source artifact. + // +optional + ObservedIgnore *string `json:"observedIgnore,omitempty"` + + // ObservedLayerSelector is the observed layer selector used for constructing + // the source artifact. + // +optional + ObservedLayerSelector *OCILayerSelector `json:"observedLayerSelector,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // OCIPullFailedReason signals that a pull operation failed. + OCIPullFailedReason string = "OCIArtifactPullFailed" + + // OCILayerOperationFailedReason signals that an OCI layer operation failed. + OCILayerOperationFailedReason string = "OCIArtifactLayerOperationFailed" +) + +// GetConditions returns the status conditions of the object. +func (in OCIRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *OCIRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the OCIRepository must be +// reconciled again. +func (in OCIRepository) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetArtifact returns the latest Artifact from the OCIRepository if present in +// the status sub-resource. +func (in *OCIRepository) GetArtifact() *meta.Artifact { + return in.Status.Artifact +} + +// GetLayerMediaType returns the media type layer selector if found in spec. +func (in *OCIRepository) GetLayerMediaType() string { + if in.Spec.LayerSelector == nil { + return "" + } + + return in.Spec.LayerSelector.MediaType +} + +// GetLayerOperation returns the layer selector operation (defaults to extract). +func (in *OCIRepository) GetLayerOperation() string { + if in.Spec.LayerSelector == nil || in.Spec.LayerSelector.Operation == "" { + return OCILayerExtract + } + + return in.Spec.LayerSelector.Operation +} + +// +genclient +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=ocirepo +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" + +// OCIRepository is the Schema for the ocirepositories API +type OCIRepository struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OCIRepositorySpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status OCIRepositoryStatus `json:"status,omitempty"` +} + +// OCIRepositoryList contains a list of OCIRepository +// +kubebuilder:object:root=true +type OCIRepositoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OCIRepository `json:"items"` +} + +func init() { + SchemeBuilder.Register(&OCIRepository{}, &OCIRepositoryList{}) +} diff --git a/api/v1/source.go b/api/v1/source.go index 83040bc22..d879f6034 100644 --- a/api/v1/source.go +++ b/api/v1/source.go @@ -20,6 +20,8 @@ import ( "time" "k8s.io/apimachinery/pkg/runtime" + + "github.com/fluxcd/pkg/apis/meta" ) const ( @@ -41,5 +43,5 @@ type Source interface { GetRequeueAfter() time.Duration // GetArtifact returns the latest artifact from the source if present in // the status sub-resource. - GetArtifact() *Artifact + GetArtifact() *meta.Artifact } diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 12e537fae..14f1ba3c2 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated /* -Copyright 2024 The Flux authors +Copyright 2025 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -27,34 +27,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Artifact) DeepCopyInto(out *Artifact) { - *out = *in - in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) - if in.Size != nil { - in, out := &in.Size, &out.Size - *out = new(int64) - **out = **in - } - if in.Metadata != nil { - in, out := &in.Metadata, &out.Metadata - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Artifact. -func (in *Artifact) DeepCopy() *Artifact { - if in == nil { - return nil - } - out := new(Artifact) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Bucket) DeepCopyInto(out *Bucket) { *out = *in @@ -197,7 +169,7 @@ func (in *BucketStatus) DeepCopyInto(out *BucketStatus) { } if in.Artifact != nil { in, out := &in.Artifact, &out.Artifact - *out = new(Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } if in.ObservedIgnore != nil { @@ -218,6 +190,112 @@ func (in *BucketStatus) DeepCopy() *BucketStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalArtifact) DeepCopyInto(out *ExternalArtifact) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalArtifact. +func (in *ExternalArtifact) DeepCopy() *ExternalArtifact { + if in == nil { + return nil + } + out := new(ExternalArtifact) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExternalArtifact) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalArtifactList) DeepCopyInto(out *ExternalArtifactList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ExternalArtifact, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalArtifactList. +func (in *ExternalArtifactList) DeepCopy() *ExternalArtifactList { + if in == nil { + return nil + } + out := new(ExternalArtifactList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExternalArtifactList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalArtifactSpec) DeepCopyInto(out *ExternalArtifactSpec) { + *out = *in + if in.SourceRef != nil { + in, out := &in.SourceRef, &out.SourceRef + *out = new(meta.NamespacedObjectKindReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalArtifactSpec. +func (in *ExternalArtifactSpec) DeepCopy() *ExternalArtifactSpec { + if in == nil { + return nil + } + out := new(ExternalArtifactSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalArtifactStatus) DeepCopyInto(out *ExternalArtifactStatus) { + *out = *in + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalArtifactStatus. +func (in *ExternalArtifactStatus) DeepCopy() *ExternalArtifactStatus { + if in == nil { + return nil + } + out := new(ExternalArtifactStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GitRepository) DeepCopyInto(out *GitRepository) { *out = *in @@ -347,6 +425,11 @@ func (in *GitRepositorySpec) DeepCopyInto(out *GitRepositorySpec) { *out = make([]GitRepositoryInclude, len(*in)) copy(*out, *in) } + if in.SparseCheckout != nil { + in, out := &in.SparseCheckout, &out.SparseCheckout + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositorySpec. @@ -371,16 +454,16 @@ func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) { } if in.Artifact != nil { in, out := &in.Artifact, &out.Artifact - *out = new(Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } if in.IncludedArtifacts != nil { in, out := &in.IncludedArtifacts, &out.IncludedArtifacts - *out = make([]*Artifact, len(*in)) + *out = make([]*meta.Artifact, len(*in)) for i := range *in { if (*in)[i] != nil { in, out := &(*in)[i], &(*out)[i] - *out = new(Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } } @@ -395,6 +478,11 @@ func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) { *out = make([]GitRepositoryInclude, len(*in)) copy(*out, *in) } + if in.ObservedSparseCheckout != nil { + in, out := &in.ObservedSparseCheckout, &out.ObservedSparseCheckout + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.SourceVerificationMode != nil { in, out := &in.SourceVerificationMode, &out.SourceVerificationMode *out = new(GitVerificationMode) @@ -532,7 +620,7 @@ func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) { } if in.Artifact != nil { in, out := &in.Artifact, &out.Artifact - *out = new(Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } out.ReconcileRequestStatus = in.ReconcileRequestStatus @@ -655,7 +743,7 @@ func (in *HelmRepositoryStatus) DeepCopyInto(out *HelmRepositoryStatus) { } if in.Artifact != nil { in, out := &in.Artifact, &out.Artifact - *out = new(Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } out.ReconcileRequestStatus = in.ReconcileRequestStatus @@ -686,6 +774,189 @@ func (in *LocalHelmChartSourceReference) DeepCopy() *LocalHelmChartSourceReferen return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCILayerSelector) DeepCopyInto(out *OCILayerSelector) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCILayerSelector. +func (in *OCILayerSelector) DeepCopy() *OCILayerSelector { + if in == nil { + return nil + } + out := new(OCILayerSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepository) DeepCopyInto(out *OCIRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepository. +func (in *OCIRepository) DeepCopy() *OCIRepository { + if in == nil { + return nil + } + out := new(OCIRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OCIRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositoryList) DeepCopyInto(out *OCIRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OCIRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryList. +func (in *OCIRepositoryList) DeepCopy() *OCIRepositoryList { + if in == nil { + return nil + } + out := new(OCIRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OCIRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositoryRef) DeepCopyInto(out *OCIRepositoryRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryRef. +func (in *OCIRepositoryRef) DeepCopy() *OCIRepositoryRef { + if in == nil { + return nil + } + out := new(OCIRepositoryRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositorySpec) DeepCopyInto(out *OCIRepositorySpec) { + *out = *in + if in.Reference != nil { + in, out := &in.Reference, &out.Reference + *out = new(OCIRepositoryRef) + **out = **in + } + if in.LayerSelector != nil { + in, out := &in.LayerSelector, &out.LayerSelector + *out = new(OCILayerSelector) + **out = **in + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.Verify != nil { + in, out := &in.Verify, &out.Verify + *out = new(OCIRepositoryVerification) + (*in).DeepCopyInto(*out) + } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.ProxySecretRef != nil { + in, out := &in.ProxySecretRef, &out.ProxySecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + if in.Ignore != nil { + in, out := &in.Ignore, &out.Ignore + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositorySpec. +func (in *OCIRepositorySpec) DeepCopy() *OCIRepositorySpec { + if in == nil { + return nil + } + out := new(OCIRepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OCIRepositoryStatus) DeepCopyInto(out *OCIRepositoryStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(meta.Artifact) + (*in).DeepCopyInto(*out) + } + if in.ObservedIgnore != nil { + in, out := &in.ObservedIgnore, &out.ObservedIgnore + *out = new(string) + **out = **in + } + if in.ObservedLayerSelector != nil { + in, out := &in.ObservedLayerSelector, &out.ObservedLayerSelector + *out = new(OCILayerSelector) + **out = **in + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryStatus. +func (in *OCIRepositoryStatus) DeepCopy() *OCIRepositoryStatus { + if in == nil { + return nil + } + out := new(OCIRepositoryStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OCIRepositoryVerification) DeepCopyInto(out *OCIRepositoryVerification) { *out = *in diff --git a/api/v1beta1/bucket_types.go b/api/v1beta1/bucket_types.go index 639a0bbe0..e64321c9d 100644 --- a/api/v1beta1/bucket_types.go +++ b/api/v1beta1/bucket_types.go @@ -194,12 +194,7 @@ func (in *Bucket) GetInterval() metav1.Duration { // +genclient // +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:deprecatedversion:warning="v1beta1 Bucket is deprecated, upgrade to v1" -// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint` -// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:skipversion // Bucket is the Schema for the buckets API type Bucket struct { diff --git a/api/v1beta1/doc.go b/api/v1beta1/doc.go index 7a768a45d..f604a2624 100644 --- a/api/v1beta1/doc.go +++ b/api/v1beta1/doc.go @@ -15,6 +15,9 @@ limitations under the License. */ // Package v1beta1 contains API Schema definitions for the source v1beta1 API group +// +// Deprecated: v1beta1 is no longer supported, use v1 instead. +// // +kubebuilder:object:generate=true // +groupName=source.toolkit.fluxcd.io package v1beta1 diff --git a/api/v1beta1/gitrepository_types.go b/api/v1beta1/gitrepository_types.go index 8a4c46fe8..05cce7c60 100644 --- a/api/v1beta1/gitrepository_types.go +++ b/api/v1beta1/gitrepository_types.go @@ -267,12 +267,7 @@ func (in *GitRepository) GetInterval() metav1.Duration { // +genclient // +kubebuilder:object:root=true // +kubebuilder:resource:shortName=gitrepo -// +kubebuilder:subresource:status -// +kubebuilder:deprecatedversion:warning="v1beta1 GitRepository is deprecated, upgrade to v1" -// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` -// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:skipversion // GitRepository is the Schema for the gitrepositories API type GitRepository struct { diff --git a/api/v1beta1/helmchart_types.go b/api/v1beta1/helmchart_types.go index d4c1564cc..22e5dda58 100644 --- a/api/v1beta1/helmchart_types.go +++ b/api/v1beta1/helmchart_types.go @@ -233,15 +233,7 @@ func (in *HelmChart) GetValuesFiles() []string { // +genclient // +kubebuilder:object:root=true // +kubebuilder:resource:shortName=hc -// +kubebuilder:subresource:status -// +kubebuilder:deprecatedversion:warning="v1beta1 HelmChart is deprecated, upgrade to v1" -// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart` -// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version` -// +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind` -// +kubebuilder:printcolumn:name="Source Name",type=string,JSONPath=`.spec.sourceRef.name` -// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:skipversion // HelmChart is the Schema for the helmcharts API type HelmChart struct { diff --git a/api/v1beta1/helmrepository_types.go b/api/v1beta1/helmrepository_types.go index fe0ed7124..4530b82a9 100644 --- a/api/v1beta1/helmrepository_types.go +++ b/api/v1beta1/helmrepository_types.go @@ -183,12 +183,7 @@ func (in *HelmRepository) GetInterval() metav1.Duration { // +genclient // +kubebuilder:object:root=true // +kubebuilder:resource:shortName=helmrepo -// +kubebuilder:subresource:status -// +kubebuilder:deprecatedversion:warning="v1beta1 HelmRepository is deprecated, upgrade to v1" -// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` -// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:skipversion // HelmRepository is the Schema for the helmrepositories API type HelmRepository struct { diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index a600106ea..10be7301e 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated /* -Copyright 2024 The Flux authors +Copyright 2025 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/api/v1beta2/bucket_types.go b/api/v1beta2/bucket_types.go index d18fc76f7..6495abdd0 100644 --- a/api/v1beta2/bucket_types.go +++ b/api/v1beta2/bucket_types.go @@ -229,7 +229,7 @@ type BucketStatus struct { // Artifact represents the last successful Bucket reconciliation. // +optional - Artifact *apiv1.Artifact `json:"artifact,omitempty"` + Artifact *meta.Artifact `json:"artifact,omitempty"` // ObservedIgnore is the observed exclusion patterns used for constructing // the source artifact. @@ -265,7 +265,7 @@ func (in Bucket) GetRequeueAfter() time.Duration { } // GetArtifact returns the latest artifact from the source if present in the status sub-resource. -func (in *Bucket) GetArtifact() *apiv1.Artifact { +func (in *Bucket) GetArtifact() *meta.Artifact { return in.Status.Artifact } diff --git a/api/v1beta2/gitrepository_types.go b/api/v1beta2/gitrepository_types.go index 2e8685cda..89beeb9a7 100644 --- a/api/v1beta2/gitrepository_types.go +++ b/api/v1beta2/gitrepository_types.go @@ -23,8 +23,6 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" - - apiv1 "github.com/fluxcd/source-controller/api/v1" ) const ( @@ -214,12 +212,12 @@ type GitRepositoryStatus struct { // Artifact represents the last successful GitRepository reconciliation. // +optional - Artifact *apiv1.Artifact `json:"artifact,omitempty"` + Artifact *meta.Artifact `json:"artifact,omitempty"` // IncludedArtifacts contains a list of the last successfully included // Artifacts as instructed by GitRepositorySpec.Include. // +optional - IncludedArtifacts []*apiv1.Artifact `json:"includedArtifacts,omitempty"` + IncludedArtifacts []*meta.Artifact `json:"includedArtifacts,omitempty"` // ContentConfigChecksum is a checksum of all the configurations related to // the content of the source artifact: @@ -282,7 +280,7 @@ func (in GitRepository) GetRequeueAfter() time.Duration { // GetArtifact returns the latest Artifact from the GitRepository if present in // the status sub-resource. -func (in *GitRepository) GetArtifact() *apiv1.Artifact { +func (in *GitRepository) GetArtifact() *meta.Artifact { return in.Status.Artifact } diff --git a/api/v1beta2/helmchart_types.go b/api/v1beta2/helmchart_types.go index 6bc7875a8..ac24b1c13 100644 --- a/api/v1beta2/helmchart_types.go +++ b/api/v1beta2/helmchart_types.go @@ -166,7 +166,7 @@ type HelmChartStatus struct { // Artifact represents the output of the last successful reconciliation. // +optional - Artifact *apiv1.Artifact `json:"artifact,omitempty"` + Artifact *meta.Artifact `json:"artifact,omitempty"` meta.ReconcileRequestStatus `json:",inline"` } @@ -199,7 +199,7 @@ func (in HelmChart) GetRequeueAfter() time.Duration { // GetArtifact returns the latest artifact from the source if present in the // status sub-resource. -func (in *HelmChart) GetArtifact() *apiv1.Artifact { +func (in *HelmChart) GetArtifact() *meta.Artifact { return in.Status.Artifact } diff --git a/api/v1beta2/helmrepository_types.go b/api/v1beta2/helmrepository_types.go index 0a618b88b..56cbd928c 100644 --- a/api/v1beta2/helmrepository_types.go +++ b/api/v1beta2/helmrepository_types.go @@ -23,8 +23,6 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" - - apiv1 "github.com/fluxcd/source-controller/api/v1" ) const ( @@ -152,7 +150,7 @@ type HelmRepositoryStatus struct { // Artifact represents the last successful HelmRepository reconciliation. // +optional - Artifact *apiv1.Artifact `json:"artifact,omitempty"` + Artifact *meta.Artifact `json:"artifact,omitempty"` meta.ReconcileRequestStatus `json:",inline"` } @@ -193,7 +191,7 @@ func (in HelmRepository) GetTimeout() time.Duration { // GetArtifact returns the latest artifact from the source if present in the // status sub-resource. -func (in *HelmRepository) GetArtifact() *apiv1.Artifact { +func (in *HelmRepository) GetArtifact() *meta.Artifact { return in.Status.Artifact } diff --git a/api/v1beta2/ocirepository_types.go b/api/v1beta2/ocirepository_types.go index 9030fab74..760f0d8f1 100644 --- a/api/v1beta2/ocirepository_types.go +++ b/api/v1beta2/ocirepository_types.go @@ -205,7 +205,7 @@ type OCIRepositoryStatus struct { // Artifact represents the output of the last successful OCI Repository sync. // +optional - Artifact *apiv1.Artifact `json:"artifact,omitempty"` + Artifact *meta.Artifact `json:"artifact,omitempty"` // ContentConfigChecksum is a checksum of all the configurations related to // the content of the source artifact: @@ -260,7 +260,7 @@ func (in OCIRepository) GetRequeueAfter() time.Duration { // GetArtifact returns the latest Artifact from the OCIRepository if present in // the status sub-resource. -func (in *OCIRepository) GetArtifact() *apiv1.Artifact { +func (in *OCIRepository) GetArtifact() *meta.Artifact { return in.Status.Artifact } @@ -283,10 +283,10 @@ func (in *OCIRepository) GetLayerOperation() string { } // +genclient -// +kubebuilder:storageversion // +kubebuilder:object:root=true // +kubebuilder:resource:shortName=ocirepo // +kubebuilder:subresource:status +// +kubebuilder:deprecatedversion:warning="v1beta2 OCIRepository is deprecated, upgrade to v1" // +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index 354bceefb..0b874dd7e 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated /* -Copyright 2024 The Flux authors +Copyright 2025 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -203,7 +203,7 @@ func (in *BucketStatus) DeepCopyInto(out *BucketStatus) { } if in.Artifact != nil { in, out := &in.Artifact, &out.Artifact - *out = new(apiv1.Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } if in.ObservedIgnore != nil { @@ -377,16 +377,16 @@ func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) { } if in.Artifact != nil { in, out := &in.Artifact, &out.Artifact - *out = new(apiv1.Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } if in.IncludedArtifacts != nil { in, out := &in.IncludedArtifacts, &out.IncludedArtifacts - *out = make([]*apiv1.Artifact, len(*in)) + *out = make([]*meta.Artifact, len(*in)) for i := range *in { if (*in)[i] != nil { in, out := &(*in)[i], &(*out)[i] - *out = new(apiv1.Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } } @@ -538,7 +538,7 @@ func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) { } if in.Artifact != nil { in, out := &in.Artifact, &out.Artifact - *out = new(apiv1.Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } out.ReconcileRequestStatus = in.ReconcileRequestStatus @@ -661,7 +661,7 @@ func (in *HelmRepositoryStatus) DeepCopyInto(out *HelmRepositoryStatus) { } if in.Artifact != nil { in, out := &in.Artifact, &out.Artifact - *out = new(apiv1.Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } out.ReconcileRequestStatus = in.ReconcileRequestStatus @@ -849,7 +849,7 @@ func (in *OCIRepositoryStatus) DeepCopyInto(out *OCIRepositoryStatus) { } if in.Artifact != nil { in, out := &in.Artifact, &out.Artifact - *out = new(apiv1.Artifact) + *out = new(meta.Artifact) (*in).DeepCopyInto(*out) } if in.ObservedIgnore != nil { diff --git a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml index 3d8f812cc..f578c8da0 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.19.0 name: buckets.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -142,6 +142,13 @@ spec: required: - name type: object + serviceAccountName: + description: |- + ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate + the bucket. This field is only supported for the 'gcp' and 'aws' providers. + For more information about workload identity: + https://fluxcd.io/flux/components/source/buckets/#workload-identity + type: string sts: description: |- STS specifies the required configuration to use a Security Token @@ -232,6 +239,11 @@ spec: rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.secretRef)' - message: spec.sts.certSecretRef is not required for the 'aws' STS provider rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.certSecretRef)' + - message: ServiceAccountName is not supported for the 'generic' Bucket + provider + rule: self.provider != 'generic' || !has(self.serviceAccountName) + - message: cannot set both .spec.secretRef and .spec.serviceAccountName + rule: '!has(self.secretRef) || !has(self.serviceAccountName)' status: default: observedGeneration: -1 @@ -277,6 +289,7 @@ spec: consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest - lastUpdateTime - path - revision @@ -367,238 +380,6 @@ spec: storage: true subresources: status: {} - - additionalPrinterColumns: - - jsonPath: .spec.endpoint - name: Endpoint - type: string - - jsonPath: .status.conditions[?(@.type=="Ready")].status - name: Ready - type: string - - jsonPath: .status.conditions[?(@.type=="Ready")].message - name: Status - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - deprecated: true - deprecationWarning: v1beta1 Bucket is deprecated, upgrade to v1 - name: v1beta1 - schema: - openAPIV3Schema: - description: Bucket is the Schema for the buckets API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: BucketSpec defines the desired state of an S3 compatible - bucket - properties: - accessFrom: - description: AccessFrom defines an Access Control List for allowing - cross-namespace references to this object. - properties: - namespaceSelectors: - description: |- - NamespaceSelectors is the list of namespace selectors to which this ACL applies. - Items in this list are evaluated using a logical OR operation. - items: - description: |- - NamespaceSelector selects the namespaces to which this ACL applies. - An empty map of MatchLabels matches all namespaces in a cluster. - properties: - matchLabels: - additionalProperties: - type: string - description: |- - MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - type: array - required: - - namespaceSelectors - type: object - bucketName: - description: The bucket name. - type: string - endpoint: - description: The bucket endpoint address. - type: string - ignore: - description: |- - Ignore overrides the set of excluded patterns in the .sourceignore format - (which is the same as .gitignore). If not provided, a default will be used, - consult the documentation for your version to find out what those are. - type: string - insecure: - description: Insecure allows connecting to a non-TLS S3 HTTP endpoint. - type: boolean - interval: - description: The interval at which to check for bucket updates. - type: string - provider: - default: generic - description: The S3 compatible storage provider name, default ('generic'). - enum: - - generic - - aws - - gcp - type: string - region: - description: The bucket region. - type: string - secretRef: - description: |- - The name of the secret containing authentication credentials - for the Bucket. - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - suspend: - description: This flag tells the controller to suspend the reconciliation - of this source. - type: boolean - timeout: - default: 60s - description: The timeout for download operations, defaults to 60s. - type: string - required: - - bucketName - - endpoint - - interval - type: object - status: - default: - observedGeneration: -1 - description: BucketStatus defines the observed state of a bucket - properties: - artifact: - description: Artifact represents the output of the last successful - Bucket sync. - properties: - checksum: - description: Checksum is the SHA256 checksum of the artifact. - type: string - lastUpdateTime: - description: |- - LastUpdateTime is the timestamp corresponding to the last update of this - artifact. - format: date-time - type: string - path: - description: Path is the relative file path of this artifact. - type: string - revision: - description: |- - Revision is a human readable identifier traceable in the origin source - system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm - chart version, etc. - type: string - url: - description: URL is the HTTP address of this artifact. - type: string - required: - - lastUpdateTime - - path - - url - type: object - conditions: - description: Conditions holds the conditions for the Bucket. - items: - description: Condition contains details for one aspect of the current - state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - lastHandledReconcileAt: - description: |- - LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value - can be detected. - type: string - observedGeneration: - description: ObservedGeneration is the last observed generation. - format: int64 - type: integer - url: - description: URL is the download link for the artifact output of the - last Bucket sync. - type: string - type: object - type: object - served: true - storage: false - subresources: - status: {} - additionalPrinterColumns: - jsonPath: .spec.endpoint name: Endpoint @@ -892,6 +673,7 @@ spec: consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest - lastUpdateTime - path - revision diff --git a/config/crd/bases/source.toolkit.fluxcd.io_externalartifacts.yaml b/config/crd/bases/source.toolkit.fluxcd.io_externalartifacts.yaml new file mode 100644 index 000000000..23cdf63c3 --- /dev/null +++ b/config/crd/bases/source.toolkit.fluxcd.io_externalartifacts.yaml @@ -0,0 +1,191 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: externalartifacts.source.toolkit.fluxcd.io +spec: + group: source.toolkit.fluxcd.io + names: + kind: ExternalArtifact + listKind: ExternalArtifactList + plural: externalartifacts + singular: externalartifact + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + - jsonPath: .spec.sourceRef.name + name: Source + type: string + name: v1 + schema: + openAPIV3Schema: + description: ExternalArtifact is the Schema for the external artifacts API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ExternalArtifactSpec defines the desired state of ExternalArtifact + properties: + sourceRef: + description: |- + SourceRef points to the Kubernetes custom resource for + which the artifact is generated. + properties: + apiVersion: + description: API version of the referent, if not specified the + Kubernetes preferred version will be used. + type: string + kind: + description: Kind of the referent. + type: string + name: + description: Name of the referent. + type: string + namespace: + description: Namespace of the referent, when not specified it + acts as LocalObjectReference. + type: string + required: + - kind + - name + type: object + type: object + status: + description: ExternalArtifactStatus defines the observed state of ExternalArtifact + properties: + artifact: + description: Artifact represents the output of an ExternalArtifact + reconciliation. + properties: + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ + type: string + lastUpdateTime: + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. + format: date-time + type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object + path: + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. + type: string + revision: + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. + type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer + url: + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. + type: string + required: + - digest + - lastUpdateTime + - path + - revision + - url + type: object + conditions: + description: Conditions holds the conditions for the ExternalArtifact. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml index 0e37a7b49..10663e473 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.19.0 name: gitrepositories.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -174,6 +174,19 @@ spec: required: - name type: object + serviceAccountName: + description: |- + ServiceAccountName is the name of the Kubernetes ServiceAccount used to + authenticate to the GitRepository. This field is only supported for 'azure' provider. + type: string + sparseCheckout: + description: |- + SparseCheckout specifies a list of directories to checkout when cloning + the repository. If specified, only these directories are included in the + Artifact produced for this GitRepository. + items: + type: string + type: array suspend: description: |- Suspend tells the controller to suspend the reconciliation of this @@ -227,6 +240,10 @@ spec: - interval - url type: object + x-kubernetes-validations: + - message: serviceAccountName can only be set when provider is 'azure' + rule: '!has(self.serviceAccountName) || (has(self.provider) && self.provider + == ''azure'')' status: default: observedGeneration: -1 @@ -273,6 +290,7 @@ spec: consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest - lastUpdateTime - path - revision @@ -381,6 +399,7 @@ spec: consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest - lastUpdateTime - path - revision @@ -443,6 +462,13 @@ spec: ObservedRecurseSubmodules is the observed resource submodules configuration used to produce the current Artifact. type: boolean + observedSparseCheckout: + description: |- + ObservedSparseCheckout is the observed list of directories used to + produce the current Artifact. + items: + type: string + type: array sourceVerificationMode: description: |- SourceVerificationMode is the last used verification mode indicating @@ -454,343 +480,6 @@ spec: storage: true subresources: status: {} - - additionalPrinterColumns: - - jsonPath: .spec.url - name: URL - type: string - - jsonPath: .status.conditions[?(@.type=="Ready")].status - name: Ready - type: string - - jsonPath: .status.conditions[?(@.type=="Ready")].message - name: Status - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - deprecated: true - deprecationWarning: v1beta1 GitRepository is deprecated, upgrade to v1 - name: v1beta1 - schema: - openAPIV3Schema: - description: GitRepository is the Schema for the gitrepositories API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: GitRepositorySpec defines the desired state of a Git repository. - properties: - accessFrom: - description: AccessFrom defines an Access Control List for allowing - cross-namespace references to this object. - properties: - namespaceSelectors: - description: |- - NamespaceSelectors is the list of namespace selectors to which this ACL applies. - Items in this list are evaluated using a logical OR operation. - items: - description: |- - NamespaceSelector selects the namespaces to which this ACL applies. - An empty map of MatchLabels matches all namespaces in a cluster. - properties: - matchLabels: - additionalProperties: - type: string - description: |- - MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - type: array - required: - - namespaceSelectors - type: object - gitImplementation: - default: go-git - description: |- - Determines which git client library to use. - Defaults to go-git, valid values are ('go-git', 'libgit2'). - enum: - - go-git - - libgit2 - type: string - ignore: - description: |- - Ignore overrides the set of excluded patterns in the .sourceignore format - (which is the same as .gitignore). If not provided, a default will be used, - consult the documentation for your version to find out what those are. - type: string - include: - description: Extra git repositories to map into the repository - items: - description: GitRepositoryInclude defines a source with a from and - to path. - properties: - fromPath: - description: The path to copy contents from, defaults to the - root directory. - type: string - repository: - description: Reference to a GitRepository to include. - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - toPath: - description: The path to copy contents to, defaults to the name - of the source ref. - type: string - required: - - repository - type: object - type: array - interval: - description: The interval at which to check for repository updates. - type: string - recurseSubmodules: - description: |- - When enabled, after the clone is created, initializes all submodules within, - using their default settings. - This option is available only when using the 'go-git' GitImplementation. - type: boolean - ref: - description: |- - The Git reference to checkout and monitor for changes, defaults to - master branch. - properties: - branch: - description: The Git branch to checkout, defaults to master. - type: string - commit: - description: The Git commit SHA to checkout, if specified Tag - filters will be ignored. - type: string - semver: - description: The Git tag semver expression, takes precedence over - Tag. - type: string - tag: - description: The Git tag to checkout, takes precedence over Branch. - type: string - type: object - secretRef: - description: |- - The secret name containing the Git credentials. - For HTTPS repositories the secret must contain username and password - fields. - For SSH repositories the secret must contain identity and known_hosts - fields. - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - suspend: - description: This flag tells the controller to suspend the reconciliation - of this source. - type: boolean - timeout: - default: 60s - description: The timeout for remote Git operations like cloning, defaults - to 60s. - type: string - url: - description: The repository URL, can be a HTTP/S or SSH address. - pattern: ^(http|https|ssh)://.*$ - type: string - verify: - description: Verify OpenPGP signature for the Git commit HEAD points - to. - properties: - mode: - description: Mode describes what git object should be verified, - currently ('head'). - enum: - - head - type: string - secretRef: - description: The secret name containing the public keys of all - trusted Git authors. - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - required: - - mode - type: object - required: - - interval - - url - type: object - status: - default: - observedGeneration: -1 - description: GitRepositoryStatus defines the observed state of a Git repository. - properties: - artifact: - description: Artifact represents the output of the last successful - repository sync. - properties: - checksum: - description: Checksum is the SHA256 checksum of the artifact. - type: string - lastUpdateTime: - description: |- - LastUpdateTime is the timestamp corresponding to the last update of this - artifact. - format: date-time - type: string - path: - description: Path is the relative file path of this artifact. - type: string - revision: - description: |- - Revision is a human readable identifier traceable in the origin source - system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm - chart version, etc. - type: string - url: - description: URL is the HTTP address of this artifact. - type: string - required: - - lastUpdateTime - - path - - url - type: object - conditions: - description: Conditions holds the conditions for the GitRepository. - items: - description: Condition contains details for one aspect of the current - state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - includedArtifacts: - description: IncludedArtifacts represents the included artifacts from - the last successful repository sync. - items: - description: Artifact represents the output of a source synchronisation. - properties: - checksum: - description: Checksum is the SHA256 checksum of the artifact. - type: string - lastUpdateTime: - description: |- - LastUpdateTime is the timestamp corresponding to the last update of this - artifact. - format: date-time - type: string - path: - description: Path is the relative file path of this artifact. - type: string - revision: - description: |- - Revision is a human readable identifier traceable in the origin source - system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm - chart version, etc. - type: string - url: - description: URL is the HTTP address of this artifact. - type: string - required: - - lastUpdateTime - - path - - url - type: object - type: array - lastHandledReconcileAt: - description: |- - LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value - can be detected. - type: string - observedGeneration: - description: ObservedGeneration is the last observed generation. - format: int64 - type: integer - url: - description: |- - URL is the download link for the artifact output of the last repository - sync. - type: string - type: object - type: object - served: true - storage: false - subresources: - status: {} - additionalPrinterColumns: - jsonPath: .spec.url name: URL @@ -1059,6 +748,7 @@ spec: consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest - lastUpdateTime - path - revision @@ -1182,6 +872,7 @@ spec: consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest - lastUpdateTime - path - revision diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml index 26e5a7e97..0e57c72a5 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.19.0 name: helmcharts.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -244,6 +244,7 @@ spec: consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest - lastUpdateTime - path - revision @@ -348,263 +349,6 @@ spec: storage: true subresources: status: {} - - additionalPrinterColumns: - - jsonPath: .spec.chart - name: Chart - type: string - - jsonPath: .spec.version - name: Version - type: string - - jsonPath: .spec.sourceRef.kind - name: Source Kind - type: string - - jsonPath: .spec.sourceRef.name - name: Source Name - type: string - - jsonPath: .status.conditions[?(@.type=="Ready")].status - name: Ready - type: string - - jsonPath: .status.conditions[?(@.type=="Ready")].message - name: Status - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - deprecated: true - deprecationWarning: v1beta1 HelmChart is deprecated, upgrade to v1 - name: v1beta1 - schema: - openAPIV3Schema: - description: HelmChart is the Schema for the helmcharts API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: HelmChartSpec defines the desired state of a Helm chart. - properties: - accessFrom: - description: AccessFrom defines an Access Control List for allowing - cross-namespace references to this object. - properties: - namespaceSelectors: - description: |- - NamespaceSelectors is the list of namespace selectors to which this ACL applies. - Items in this list are evaluated using a logical OR operation. - items: - description: |- - NamespaceSelector selects the namespaces to which this ACL applies. - An empty map of MatchLabels matches all namespaces in a cluster. - properties: - matchLabels: - additionalProperties: - type: string - description: |- - MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - type: array - required: - - namespaceSelectors - type: object - chart: - description: The name or path the Helm chart is available at in the - SourceRef. - type: string - interval: - description: The interval at which to check the Source for updates. - type: string - reconcileStrategy: - default: ChartVersion - description: |- - Determines what enables the creation of a new artifact. Valid values are - ('ChartVersion', 'Revision'). - See the documentation of the values for an explanation on their behavior. - Defaults to ChartVersion when omitted. - enum: - - ChartVersion - - Revision - type: string - sourceRef: - description: The reference to the Source the chart is available at. - properties: - apiVersion: - description: APIVersion of the referent. - type: string - kind: - description: |- - Kind of the referent, valid values are ('HelmRepository', 'GitRepository', - 'Bucket'). - enum: - - HelmRepository - - GitRepository - - Bucket - type: string - name: - description: Name of the referent. - type: string - required: - - kind - - name - type: object - suspend: - description: This flag tells the controller to suspend the reconciliation - of this source. - type: boolean - valuesFile: - description: |- - Alternative values file to use as the default chart values, expected to - be a relative path in the SourceRef. Deprecated in favor of ValuesFiles, - for backwards compatibility the file defined here is merged before the - ValuesFiles items. Ignored when omitted. - type: string - valuesFiles: - description: |- - Alternative list of values files to use as the chart values (values.yaml - is not included by default), expected to be a relative path in the SourceRef. - Values files are merged in the order of this list with the last file overriding - the first. Ignored when omitted. - items: - type: string - type: array - version: - default: '*' - description: |- - The chart version semver expression, ignored for charts from GitRepository - and Bucket sources. Defaults to latest when omitted. - type: string - required: - - chart - - interval - - sourceRef - type: object - status: - default: - observedGeneration: -1 - description: HelmChartStatus defines the observed state of the HelmChart. - properties: - artifact: - description: Artifact represents the output of the last successful - chart sync. - properties: - checksum: - description: Checksum is the SHA256 checksum of the artifact. - type: string - lastUpdateTime: - description: |- - LastUpdateTime is the timestamp corresponding to the last update of this - artifact. - format: date-time - type: string - path: - description: Path is the relative file path of this artifact. - type: string - revision: - description: |- - Revision is a human readable identifier traceable in the origin source - system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm - chart version, etc. - type: string - url: - description: URL is the HTTP address of this artifact. - type: string - required: - - lastUpdateTime - - path - - url - type: object - conditions: - description: Conditions holds the conditions for the HelmChart. - items: - description: Condition contains details for one aspect of the current - state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - lastHandledReconcileAt: - description: |- - LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value - can be detected. - type: string - observedGeneration: - description: ObservedGeneration is the last observed generation. - format: int64 - type: integer - url: - description: URL is the download link for the last chart pulled. - type: string - type: object - type: object - served: true - storage: false - subresources: - status: {} - additionalPrinterColumns: - jsonPath: .spec.chart name: Chart @@ -870,6 +614,7 @@ spec: consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest - lastUpdateTime - path - revision diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml index a42f54fa6..750a36500 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.19.0 name: helmrepositories.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -232,6 +232,7 @@ spec: consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest - lastUpdateTime - path - revision @@ -318,226 +319,6 @@ spec: storage: true subresources: status: {} - - additionalPrinterColumns: - - jsonPath: .spec.url - name: URL - type: string - - jsonPath: .status.conditions[?(@.type=="Ready")].status - name: Ready - type: string - - jsonPath: .status.conditions[?(@.type=="Ready")].message - name: Status - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - deprecated: true - deprecationWarning: v1beta1 HelmRepository is deprecated, upgrade to v1 - name: v1beta1 - schema: - openAPIV3Schema: - description: HelmRepository is the Schema for the helmrepositories API - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: HelmRepositorySpec defines the reference to a Helm repository. - properties: - accessFrom: - description: AccessFrom defines an Access Control List for allowing - cross-namespace references to this object. - properties: - namespaceSelectors: - description: |- - NamespaceSelectors is the list of namespace selectors to which this ACL applies. - Items in this list are evaluated using a logical OR operation. - items: - description: |- - NamespaceSelector selects the namespaces to which this ACL applies. - An empty map of MatchLabels matches all namespaces in a cluster. - properties: - matchLabels: - additionalProperties: - type: string - description: |- - MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - type: array - required: - - namespaceSelectors - type: object - interval: - description: The interval at which to check the upstream for updates. - type: string - passCredentials: - description: |- - PassCredentials allows the credentials from the SecretRef to be passed on to - a host that does not match the host as defined in URL. - This may be required if the host of the advertised chart URLs in the index - differ from the defined URL. - Enabling this should be done with caution, as it can potentially result in - credentials getting stolen in a MITM-attack. - type: boolean - secretRef: - description: |- - The name of the secret containing authentication credentials for the Helm - repository. - For HTTP/S basic auth the secret must contain username and - password fields. - For TLS the secret must contain a certFile and keyFile, and/or - caFile fields. - properties: - name: - description: Name of the referent. - type: string - required: - - name - type: object - suspend: - description: This flag tells the controller to suspend the reconciliation - of this source. - type: boolean - timeout: - default: 60s - description: The timeout of index downloading, defaults to 60s. - type: string - url: - description: The Helm repository URL, a valid URL contains at least - a protocol and host. - type: string - required: - - interval - - url - type: object - status: - default: - observedGeneration: -1 - description: HelmRepositoryStatus defines the observed state of the HelmRepository. - properties: - artifact: - description: Artifact represents the output of the last successful - repository sync. - properties: - checksum: - description: Checksum is the SHA256 checksum of the artifact. - type: string - lastUpdateTime: - description: |- - LastUpdateTime is the timestamp corresponding to the last update of this - artifact. - format: date-time - type: string - path: - description: Path is the relative file path of this artifact. - type: string - revision: - description: |- - Revision is a human readable identifier traceable in the origin source - system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm - chart version, etc. - type: string - url: - description: URL is the HTTP address of this artifact. - type: string - required: - - lastUpdateTime - - path - - url - type: object - conditions: - description: Conditions holds the conditions for the HelmRepository. - items: - description: Condition contains details for one aspect of the current - state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - lastHandledReconcileAt: - description: |- - LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change of the annotation value - can be detected. - type: string - observedGeneration: - description: ObservedGeneration is the last observed generation. - format: int64 - type: integer - url: - description: URL is the download link for the last index fetched. - type: string - type: object - type: object - served: true - storage: false - subresources: - status: {} - additionalPrinterColumns: - jsonPath: .spec.url name: URL @@ -756,6 +537,7 @@ spec: consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest - lastUpdateTime - path - revision diff --git a/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml index a60b7b416..05b7b96ab 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.19.0 name: ocirepositories.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -29,6 +29,401 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date + name: v1 + schema: + openAPIV3Schema: + description: OCIRepository is the Schema for the ocirepositories API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: OCIRepositorySpec defines the desired state of OCIRepository + properties: + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + and whichever are supplied, will be used for connecting to the + registry. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + ignore: + description: |- + Ignore overrides the set of excluded patterns in the .sourceignore format + (which is the same as .gitignore). If not provided, a default will be used, + consult the documentation for your version to find out what those are. + type: string + insecure: + description: Insecure allows connecting to a non-TLS HTTP container + registry. + type: boolean + interval: + description: |- + Interval at which the OCIRepository URL is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ + type: string + layerSelector: + description: |- + LayerSelector specifies which layer should be extracted from the OCI artifact. + When not specified, the first layer found in the artifact is selected. + properties: + mediaType: + description: |- + MediaType specifies the OCI media type of the layer + which should be extracted from the OCI Artifact. The + first layer matching this type is selected. + type: string + operation: + description: |- + Operation specifies how the selected layer should be processed. + By default, the layer compressed content is extracted to storage. + When the operation is set to 'copy', the layer compressed content + is persisted to storage as it is. + enum: + - extract + - copy + type: string + type: object + provider: + default: generic + description: |- + The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + When not specified, defaults to 'generic'. + enum: + - generic + - aws + - azure + - gcp + type: string + proxySecretRef: + description: |- + ProxySecretRef specifies the Secret containing the proxy configuration + to use while communicating with the container registry. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + ref: + description: |- + The OCI reference to pull and monitor for changes, + defaults to the latest tag. + properties: + digest: + description: |- + Digest is the image digest to pull, takes precedence over SemVer. + The value should be in the format 'sha256:'. + type: string + semver: + description: |- + SemVer is the range of tags to pull selecting the latest within + the range, takes precedence over Tag. + type: string + semverFilter: + description: SemverFilter is a regex pattern to filter the tags + within the SemVer range. + type: string + tag: + description: Tag is the image tag to pull, defaults to latest. + type: string + type: object + secretRef: + description: |- + SecretRef contains the secret name containing the registry login + credentials to resolve image metadata. + The secret must be of type kubernetes.io/dockerconfigjson. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + serviceAccountName: + description: |- + ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate + the image pull if the service account has attached pull secrets. For more information: + https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account + type: string + suspend: + description: This flag tells the controller to suspend the reconciliation + of this source. + type: boolean + timeout: + default: 60s + description: The timeout for remote OCI Repository operations like + pulling, defaults to 60s. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ + type: string + url: + description: |- + URL is a reference to an OCI artifact repository hosted + on a remote container registry. + pattern: ^oci://.*$ + type: string + verify: + description: |- + Verify contains the secret name containing the trusted public keys + used to verify the signature and specifies which provider to use to check + whether OCI image is authentic. + properties: + matchOIDCIdentity: + description: |- + MatchOIDCIdentity specifies the identity matching criteria to use + while verifying an OCI artifact which was signed using Cosign keyless + signing. The artifact's identity is deemed to be verified if any of the + specified matchers match against the identity. + items: + description: |- + OIDCIdentityMatch specifies options for verifying the certificate identity, + i.e. the issuer and the subject of the certificate. + properties: + issuer: + description: |- + Issuer specifies the regex pattern to match against to verify + the OIDC issuer in the Fulcio certificate. The pattern must be a + valid Go regular expression. + type: string + subject: + description: |- + Subject specifies the regex pattern to match against to verify + the identity subject in the Fulcio certificate. The pattern must + be a valid Go regular expression. + type: string + required: + - issuer + - subject + type: object + type: array + provider: + default: cosign + description: Provider specifies the technology used to sign the + OCI Artifact. + enum: + - cosign + - notation + type: string + secretRef: + description: |- + SecretRef specifies the Kubernetes Secret containing the + trusted public keys. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + required: + - provider + type: object + required: + - interval + - url + type: object + status: + default: + observedGeneration: -1 + description: OCIRepositoryStatus defines the observed state of OCIRepository + properties: + artifact: + description: Artifact represents the output of the last successful + OCI Repository sync. + properties: + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ + type: string + lastUpdateTime: + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. + format: date-time + type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object + path: + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. + type: string + revision: + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. + type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer + url: + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. + type: string + required: + - digest + - lastUpdateTime + - path + - revision + - url + type: object + conditions: + description: Conditions holds the conditions for the OCIRepository. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastHandledReconcileAt: + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. + type: string + observedGeneration: + description: ObservedGeneration is the last observed generation. + format: int64 + type: integer + observedIgnore: + description: |- + ObservedIgnore is the observed exclusion patterns used for constructing + the source artifact. + type: string + observedLayerSelector: + description: |- + ObservedLayerSelector is the observed layer selector used for constructing + the source artifact. + properties: + mediaType: + description: |- + MediaType specifies the OCI media type of the layer + which should be extracted from the OCI Artifact. The + first layer matching this type is selected. + type: string + operation: + description: |- + Operation specifies how the selected layer should be processed. + By default, the layer compressed content is extracted to storage. + When the operation is set to 'copy', the layer compressed content + is persisted to storage as it is. + enum: + - extract + - copy + type: string + type: object + url: + description: URL is the download link for the artifact output of the + last OCI Repository sync. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.url + name: URL + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + deprecated: true + deprecationWarning: v1beta2 OCIRepository is deprecated, upgrade to v1 name: v1beta2 schema: openAPIV3Schema: @@ -302,6 +697,7 @@ spec: consumption, e.g. by another controller applying the Artifact contents. type: string required: + - digest - lastUpdateTime - path - revision @@ -422,6 +818,6 @@ spec: type: object type: object served: true - storage: true + storage: false subresources: status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index c00716353..2a09dbfd5 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -6,4 +6,5 @@ resources: - bases/source.toolkit.fluxcd.io_helmcharts.yaml - bases/source.toolkit.fluxcd.io_buckets.yaml - bases/source.toolkit.fluxcd.io_ocirepositories.yaml +- bases/source.toolkit.fluxcd.io_externalartifacts.yaml # +kubebuilder:scaffold:crdkustomizeresource diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index c61571390..0118ce85b 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -6,4 +6,4 @@ resources: images: - name: fluxcd/source-controller newName: fluxcd/source-controller - newTag: v1.5.0 + newTag: v1.7.0 diff --git a/config/rbac/externalartifact_editor_role.yaml b/config/rbac/externalartifact_editor_role.yaml new file mode 100644 index 000000000..ded6c1d93 --- /dev/null +++ b/config/rbac/externalartifact_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit externalartifacts. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: externalartifact-editor-role +rules: +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - externalartifacts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - externalartifacts/status + verbs: + - get diff --git a/config/rbac/externalartifact_viewer_role.yaml b/config/rbac/externalartifact_viewer_role.yaml new file mode 100644 index 000000000..d0c1d507f --- /dev/null +++ b/config/rbac/externalartifact_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view externalartifacts. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: externalartifacts-viewer-role +rules: +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - externalartifacts + verbs: + - get + - list + - watch +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - externalartifacts/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 65bd29831..d2cd9e7cb 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -15,10 +15,17 @@ rules: - "" resources: - secrets + - serviceaccounts verbs: - get - list - watch +- apiGroups: + - "" + resources: + - serviceaccounts/token + verbs: + - create - apiGroups: - source.toolkit.fluxcd.io resources: diff --git a/config/samples/source_v1beta2_ocirepository.yaml b/config/samples/source_v1_ocirepository.yaml similarity index 77% rename from config/samples/source_v1beta2_ocirepository.yaml rename to config/samples/source_v1_ocirepository.yaml index e06241b97..69fb19e2a 100644 --- a/config/samples/source_v1beta2_ocirepository.yaml +++ b/config/samples/source_v1_ocirepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: OCIRepository metadata: name: ocirepository-sample diff --git a/config/testdata/helmchart-from-oci/source.yaml b/config/testdata/helmchart-from-oci/source.yaml index 354325efa..b2786531e 100644 --- a/config/testdata/helmchart-from-oci/source.yaml +++ b/config/testdata/helmchart-from-oci/source.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: podinfo @@ -8,7 +8,7 @@ spec: type: "oci" interval: 1m --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: podinfo @@ -20,7 +20,7 @@ spec: version: '6.1.*' interval: 1m --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: podinfo-keyless diff --git a/config/testdata/helmchart-valuesfile/gitrepository.yaml b/config/testdata/helmchart-valuesfile/gitrepository.yaml index b620c8560..279979e93 100644 --- a/config/testdata/helmchart-valuesfile/gitrepository.yaml +++ b/config/testdata/helmchart-valuesfile/gitrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: GitRepository metadata: name: podinfo diff --git a/config/testdata/helmchart-valuesfile/helmchart_gitrepository.yaml b/config/testdata/helmchart-valuesfile/helmchart_gitrepository.yaml index 4483f0ca8..3c26b3eb5 100644 --- a/config/testdata/helmchart-valuesfile/helmchart_gitrepository.yaml +++ b/config/testdata/helmchart-valuesfile/helmchart_gitrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: podinfo-git @@ -8,6 +8,5 @@ spec: kind: GitRepository name: podinfo chart: charts/podinfo - valuesFile: charts/podinfo/values.yaml valuesFiles: - charts/podinfo/values-prod.yaml diff --git a/config/testdata/helmchart-valuesfile/helmchart_helmrepository.yaml b/config/testdata/helmchart-valuesfile/helmchart_helmrepository.yaml index fdf34f6bf..0b004eb7a 100644 --- a/config/testdata/helmchart-valuesfile/helmchart_helmrepository.yaml +++ b/config/testdata/helmchart-valuesfile/helmchart_helmrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: podinfo @@ -8,6 +8,5 @@ spec: kind: HelmRepository name: podinfo chart: podinfo - valuesFile: values.yaml valuesFiles: - values-prod.yaml diff --git a/config/testdata/helmchart-valuesfile/helmrepository.yaml b/config/testdata/helmchart-valuesfile/helmrepository.yaml index ab568384c..f0c178695 100644 --- a/config/testdata/helmchart-valuesfile/helmrepository.yaml +++ b/config/testdata/helmchart-valuesfile/helmrepository.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmRepository metadata: name: podinfo diff --git a/config/testdata/ocirepository/signed-with-key.yaml b/config/testdata/ocirepository/signed-with-key.yaml index 7a2bd3c2c..0a3a652ee 100644 --- a/config/testdata/ocirepository/signed-with-key.yaml +++ b/config/testdata/ocirepository/signed-with-key.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: OCIRepository metadata: name: podinfo-deploy-signed-with-key diff --git a/config/testdata/ocirepository/signed-with-keyless.yaml b/config/testdata/ocirepository/signed-with-keyless.yaml index efb02fc28..ff46ed30d 100644 --- a/config/testdata/ocirepository/signed-with-keyless.yaml +++ b/config/testdata/ocirepository/signed-with-keyless.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: OCIRepository metadata: name: podinfo-deploy-signed-with-keyless diff --git a/config/testdata/ocirepository/signed-with-notation.yaml b/config/testdata/ocirepository/signed-with-notation.yaml index 39f3fe81f..55820f6d4 100644 --- a/config/testdata/ocirepository/signed-with-notation.yaml +++ b/config/testdata/ocirepository/signed-with-notation.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: OCIRepository metadata: name: podinfo-deploy-signed-with-notation diff --git a/docs/api/v1/source.md b/docs/api/v1/source.md index 121a056cd..935d74275 100644 --- a/docs/api/v1/source.md +++ b/docs/api/v1/source.md @@ -16,6 +16,8 @@ Resource Types: HelmChart
  • HelmRepository +
  • +OCIRepository
  • Bucket

    @@ -180,6 +182,21 @@ for the Bucket.

    +serviceAccountName
    + +string + + + +(Optional) +

    ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate +the bucket. This field is only supported for the ‘gcp’ and ‘aws’ providers. +For more information about workload identity: +https://fluxcd.io/flux/components/source/buckets/#workload-identity

    + + + + certSecretRef
    @@ -396,6 +413,19 @@ When not specified, defaults to ‘generic’.

    +serviceAccountName
    + +string + + + +(Optional) +

    ServiceAccountName is the name of the Kubernetes ServiceAccount used to +authenticate to the GitRepository. This field is only supported for ‘azure’ provider.

    + + + + interval
    @@ -523,6 +553,20 @@ the GitRepository as cloned from the URL, using their default settings.

    should be included in the Artifact produced for this GitRepository.

    + + +sparseCheckout
    + +[]string + + + +(Optional) +

    SparseCheckout specifies a list of directories to checkout when cloning +the repository. If specified, only these directories are included in the +Artifact produced for this GitRepository.

    + + @@ -999,16 +1043,9 @@ HelmRepositoryStatus -

    Artifact +

    OCIRepository

    -

    -(Appears on: -BucketStatus, -GitRepositoryStatus, -HelmChartStatus, -HelmRepositoryStatus) -

    -

    Artifact represents the output of a Source reconciliation.

    +

    OCIRepository is the Schema for the ocirepositories API

    @@ -1021,90 +1058,269 @@ HelmRepositoryStatus + + + + + + + + + + + + + @@ -1323,6 +1539,21 @@ for the Bucket.

    + + + + @@ -1525,15 +1756,9 @@ github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
    -path
    - +apiVersion
    +string
    +source.toolkit.fluxcd.io/v1 +
    +kind
    string +
    +OCIRepository +
    +metadata
    + + +Kubernetes meta/v1.ObjectMeta +
    -

    Path is the relative file path of the Artifact. It can be used to locate -the file in the root of the Artifact storage on the local file system of -the controller managing the Source.

    +Refer to the Kubernetes API documentation for the fields of the +metadata field.
    +spec
    + + +OCIRepositorySpec + + +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    url
    string
    -

    URL is the HTTP address of the Artifact as exposed by the controller -managing the Source. It can be used to retrieve the Artifact for -consumption, e.g. by another controller applying the Artifact contents.

    +

    URL is a reference to an OCI artifact repository hosted +on a remote container registry.

    +
    +ref
    + + +OCIRepositoryRef + + +
    +(Optional) +

    The OCI reference to pull and monitor for changes, +defaults to the latest tag.

    +
    +layerSelector
    + + +OCILayerSelector + + +
    +(Optional) +

    LayerSelector specifies which layer should be extracted from the OCI artifact. +When not specified, the first layer found in the artifact is selected.

    -revision
    +provider
    string
    -

    Revision is a human-readable identifier traceable in the origin source -system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.

    +(Optional) +

    The provider used for authentication, can be ‘aws’, ‘azure’, ‘gcp’ or ‘generic’. +When not specified, defaults to ‘generic’.

    -digest
    +secretRef
    + + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
    +(Optional) +

    SecretRef contains the secret name containing the registry login +credentials to resolve image metadata. +The secret must be of type kubernetes.io/dockerconfigjson.

    +
    +verify
    + + +OCIRepositoryVerification + + +
    +(Optional) +

    Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic.

    +
    +serviceAccountName
    string
    (Optional) -

    Digest is the digest of the file in the form of ‘:’.

    +

    ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate +the image pull if the service account has attached pull secrets. For more information: +https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account

    +
    +certSecretRef
    + + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
    +(Optional) +

    CertSecretRef can be given the name of a Secret containing +either or both of

    +
      +
    • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
    • +
    • a PEM-encoded CA certificate (ca.crt)
    • +
    +

    and whichever are supplied, will be used for connecting to the +registry. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

    -lastUpdateTime
    +proxySecretRef
    - -Kubernetes meta/v1.Time + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference
    -

    LastUpdateTime is the timestamp corresponding to the last update of the -Artifact.

    +(Optional) +

    ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the container registry.

    -size
    +interval
    -int64 + +Kubernetes meta/v1.Duration + + +
    +

    Interval at which the OCIRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

    +
    +timeout
    + + +Kubernetes meta/v1.Duration +
    (Optional) -

    Size is the number of bytes in the file.

    +

    The timeout for remote OCI Repository operations like pulling, defaults to 60s.

    -metadata
    +ignore
    + +string + +
    +(Optional) +

    Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

    +
    +insecure
    + +bool + +
    +(Optional) +

    Insecure allows connecting to a non-TLS HTTP container registry.

    +
    +suspend
    -map[string]string +bool
    (Optional) -

    Metadata holds upstream information such as OCI annotations.

    +

    This flag tells the controller to suspend the reconciliation of this source.

    +
    +
    +status
    + + +OCIRepositoryStatus + + +
    +serviceAccountName
    + +string + +
    +(Optional) +

    ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate +the bucket. This field is only supported for the ‘gcp’ and ‘aws’ providers. +For more information about workload identity: +https://fluxcd.io/flux/components/source/buckets/#workload-identity

    +
    certSecretRef
    @@ -1483,8 +1714,8 @@ BucketStatus.Artifact data is recommended.

    artifact
    - -Artifact + +github.com/fluxcd/pkg/apis/meta.Artifact
    -

    GitRepositoryInclude +

    ExternalArtifact

    -

    -(Appears on: -GitRepositorySpec, -GitRepositoryStatus) -

    -

    GitRepositoryInclude specifies a local reference to a GitRepository which -Artifact (sub-)contents must be included, and where they should be placed.

    +

    ExternalArtifact is the Schema for the external artifacts API

    @@ -1546,36 +1771,201 @@ Artifact (sub-)contents must be included, and where they should be placed.

    - +
    +
    +
    -repository
    +metadata
    - -github.com/fluxcd/pkg/apis/meta.LocalObjectReference + +Kubernetes meta/v1.ObjectMeta
    -

    GitRepositoryRef specifies the GitRepository which Artifact contents -must be included.

    +Refer to the Kubernetes API documentation for the fields of the +metadata field.
    -fromPath
    +spec
    -string + +ExternalArtifactSpec +
    -(Optional) -

    FromPath specifies the path to copy contents from, defaults to the root -of the Artifact.

    -
    + + +
    -toPath
    +sourceRef
    -string + +github.com/fluxcd/pkg/apis/meta.NamespacedObjectKindReference + + +
    +(Optional) +

    SourceRef points to the Kubernetes custom resource for +which the artifact is generated.

    +
    + + + + +status
    + + +ExternalArtifactStatus + + + + + + + + +
    +
    +

    ExternalArtifactSpec +

    +

    +(Appears on: +ExternalArtifact) +

    +

    ExternalArtifactSpec defines the desired state of ExternalArtifact

    +
    +
    + + + + + + + + + + + + + +
    FieldDescription
    +sourceRef
    + + +github.com/fluxcd/pkg/apis/meta.NamespacedObjectKindReference + + +
    +(Optional) +

    SourceRef points to the Kubernetes custom resource for +which the artifact is generated.

    +
    +
    +
    +

    ExternalArtifactStatus +

    +

    +(Appears on: +ExternalArtifact) +

    +

    ExternalArtifactStatus defines the observed state of ExternalArtifact

    +
    +
    + + + + + + + + + + + + + + + + + +
    FieldDescription
    +artifact
    + + +github.com/fluxcd/pkg/apis/meta.Artifact + + +
    +(Optional) +

    Artifact represents the output of an ExternalArtifact reconciliation.

    +
    +conditions
    + + +[]Kubernetes meta/v1.Condition + + +
    +(Optional) +

    Conditions holds the conditions for the ExternalArtifact.

    +
    +
    +
    +

    GitRepositoryInclude +

    +

    +(Appears on: +GitRepositorySpec, +GitRepositoryStatus) +

    +

    GitRepositoryInclude specifies a local reference to a GitRepository which +Artifact (sub-)contents must be included, and where they should be placed.

    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + +
    FieldDescription
    +repository
    + + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
    +

    GitRepositoryRef specifies the GitRepository which Artifact contents +must be included.

    +
    +fromPath
    + +string + +
    +(Optional) +

    FromPath specifies the path to copy contents from, defaults to the root +of the Artifact.

    +
    +toPath
    + +string
    @@ -1736,6 +2126,19 @@ When not specified, defaults to ‘generic’.

    +serviceAccountName
    + +string + +
    +(Optional) +

    ServiceAccountName is the name of the Kubernetes ServiceAccount used to +authenticate to the GitRepository. This field is only supported for ‘azure’ provider.

    +
    interval
    @@ -1863,6 +2266,20 @@ the GitRepository as cloned from the URL, using their default settings.

    should be included in the Artifact produced for this GitRepository.

    +sparseCheckout
    + +[]string + +
    +(Optional) +

    SparseCheckout specifies a list of directories to checkout when cloning +the repository. If specified, only these directories are included in the +Artifact produced for this GitRepository.

    +
    @@ -1915,8 +2332,8 @@ object.

    artifact
    - -Artifact + +github.com/fluxcd/pkg/apis/meta.Artifact @@ -1929,8 +2346,8 @@ Artifact includedArtifacts
    - -[]Artifact + +[]github.com/fluxcd/pkg/apis/meta.Artifact @@ -1983,6 +2400,19 @@ produce the current Artifact.

    +observedSparseCheckout
    + +[]string + + + +(Optional) +

    ObservedSparseCheckout is the observed list of directories used to +produce the current Artifact.

    + + + + sourceVerificationMode
    @@ -2327,8 +2757,8 @@ BucketStatus.Artifact data is recommended.

    artifact
    -
    -Artifact + +github.com/fluxcd/pkg/apis/meta.Artifact @@ -2617,8 +3047,8 @@ HelmRepositoryStatus.Artifact data is recommended.

    artifact
    - -Artifact + +github.com/fluxcd/pkg/apis/meta.Artifact @@ -2703,11 +3133,479 @@ string
    -

    OCIRepositoryVerification +

    OCILayerSelector

    (Appears on: -HelmChartSpec) +OCIRepositorySpec, +OCIRepositoryStatus) +

    +

    OCILayerSelector specifies which layer should be extracted from an OCI Artifact

    +
    +
    + + + + + + + + + + + + + + + + + +
    FieldDescription
    +mediaType
    + +string + +
    +(Optional) +

    MediaType specifies the OCI media type of the layer +which should be extracted from the OCI Artifact. The +first layer matching this type is selected.

    +
    +operation
    + +string + +
    +(Optional) +

    Operation specifies how the selected layer should be processed. +By default, the layer compressed content is extracted to storage. +When the operation is set to ‘copy’, the layer compressed content +is persisted to storage as it is.

    +
    +
    +
    +

    OCIRepositoryRef +

    +

    +(Appears on: +OCIRepositorySpec) +

    +

    OCIRepositoryRef defines the image reference for the OCIRepository’s URL

    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + +
    FieldDescription
    +digest
    + +string + +
    +(Optional) +

    Digest is the image digest to pull, takes precedence over SemVer. +The value should be in the format ‘sha256:’.

    +
    +semver
    + +string + +
    +(Optional) +

    SemVer is the range of tags to pull selecting the latest within +the range, takes precedence over Tag.

    +
    +semverFilter
    + +string + +
    +(Optional) +

    SemverFilter is a regex pattern to filter the tags within the SemVer range.

    +
    +tag
    + +string + +
    +(Optional) +

    Tag is the image tag to pull, defaults to latest.

    +
    +
    +
    +

    OCIRepositorySpec +

    +

    +(Appears on: +OCIRepository) +

    +

    OCIRepositorySpec defines the desired state of OCIRepository

    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    FieldDescription
    +url
    + +string + +
    +

    URL is a reference to an OCI artifact repository hosted +on a remote container registry.

    +
    +ref
    + + +OCIRepositoryRef + + +
    +(Optional) +

    The OCI reference to pull and monitor for changes, +defaults to the latest tag.

    +
    +layerSelector
    + + +OCILayerSelector + + +
    +(Optional) +

    LayerSelector specifies which layer should be extracted from the OCI artifact. +When not specified, the first layer found in the artifact is selected.

    +
    +provider
    + +string + +
    +(Optional) +

    The provider used for authentication, can be ‘aws’, ‘azure’, ‘gcp’ or ‘generic’. +When not specified, defaults to ‘generic’.

    +
    +secretRef
    + + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
    +(Optional) +

    SecretRef contains the secret name containing the registry login +credentials to resolve image metadata. +The secret must be of type kubernetes.io/dockerconfigjson.

    +
    +verify
    + + +OCIRepositoryVerification + + +
    +(Optional) +

    Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic.

    +
    +serviceAccountName
    + +string + +
    +(Optional) +

    ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate +the image pull if the service account has attached pull secrets. For more information: +https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account

    +
    +certSecretRef
    + + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
    +(Optional) +

    CertSecretRef can be given the name of a Secret containing +either or both of

    +
      +
    • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
    • +
    • a PEM-encoded CA certificate (ca.crt)
    • +
    +

    and whichever are supplied, will be used for connecting to the +registry. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

    +
    +proxySecretRef
    + + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
    +(Optional) +

    ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the container registry.

    +
    +interval
    + + +Kubernetes meta/v1.Duration + + +
    +

    Interval at which the OCIRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

    +
    +timeout
    + + +Kubernetes meta/v1.Duration + + +
    +(Optional) +

    The timeout for remote OCI Repository operations like pulling, defaults to 60s.

    +
    +ignore
    + +string + +
    +(Optional) +

    Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

    +
    +insecure
    + +bool + +
    +(Optional) +

    Insecure allows connecting to a non-TLS HTTP container registry.

    +
    +suspend
    + +bool + +
    +(Optional) +

    This flag tells the controller to suspend the reconciliation of this source.

    +
    +
    +
    +

    OCIRepositoryStatus +

    +

    +(Appears on: +OCIRepository) +

    +

    OCIRepositoryStatus defines the observed state of OCIRepository

    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    FieldDescription
    +observedGeneration
    + +int64 + +
    +(Optional) +

    ObservedGeneration is the last observed generation.

    +
    +conditions
    + + +[]Kubernetes meta/v1.Condition + + +
    +(Optional) +

    Conditions holds the conditions for the OCIRepository.

    +
    +url
    + +string + +
    +(Optional) +

    URL is the download link for the artifact output of the last OCI Repository sync.

    +
    +artifact
    + + +github.com/fluxcd/pkg/apis/meta.Artifact + + +
    +(Optional) +

    Artifact represents the output of the last successful OCI Repository sync.

    +
    +observedIgnore
    + +string + +
    +(Optional) +

    ObservedIgnore is the observed exclusion patterns used for constructing +the source artifact.

    +
    +observedLayerSelector
    + + +OCILayerSelector + + +
    +(Optional) +

    ObservedLayerSelector is the observed layer selector used for constructing +the source artifact.

    +
    +ReconcileRequestStatus
    + + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + +
    +

    +(Members of ReconcileRequestStatus are embedded into this type.) +

    +
    +
    +
    +

    OCIRepositoryVerification +

    +

    +(Appears on: +HelmChartSpec, +OCIRepositorySpec)

    OCIRepositoryVerification verifies the authenticity of an OCI Artifact

    diff --git a/docs/spec/v1/README.md b/docs/spec/v1/README.md index 3a382959f..f08ea805f 100644 --- a/docs/spec/v1/README.md +++ b/docs/spec/v1/README.md @@ -6,6 +6,7 @@ This is the v1 API specification for defining the desired state sources of Kuber * Source kinds: + [GitRepository](gitrepositories.md) + + [OCIRepository](ocirepositories.md) + [HelmRepository](helmrepositories.md) + [HelmChart](helmcharts.md) + [Bucket](buckets.md) @@ -18,3 +19,4 @@ This is the v1 API specification for defining the desired state sources of Kuber * [kustomize-controller](https://github.com/fluxcd/kustomize-controller/) * [helm-controller](https://github.com/fluxcd/helm-controller/) +* [source-watcher](https://github.com/fluxcd/source-watcher/) diff --git a/docs/spec/v1/buckets.md b/docs/spec/v1/buckets.md index 9e72f112b..077ac952b 100644 --- a/docs/spec/v1/buckets.md +++ b/docs/spec/v1/buckets.md @@ -134,6 +134,9 @@ Supported options are: If you do not specify `.spec.provider`, it defaults to `generic`. +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + #### Generic When a Bucket's `spec.provider` is set to `generic`, the controller will @@ -196,6 +199,8 @@ The Provider allows for specifying the [Amazon AWS Region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions) using the [`.spec.region` field](#region). +For detailed setup instructions, see: https://fluxcd.io/flux/integrations/aws/#for-amazon-simple-storage-service + ##### AWS EC2 example **Note:** On EKS you have to create an [IAM role](#aws-iam-role-example) for @@ -270,6 +275,55 @@ data: secretkey: ``` +##### AWS Controller-Level Workload Identity example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: aws-controller-level-workload-identity + namespace: default +spec: + interval: 5m0s + provider: aws + bucketName: podinfo + endpoint: s3.amazonaws.com + region: us-east-1 + timeout: 30s +``` + +##### AWS Object-Level Workload Identity example + +**Note:** To use Object-Level Workload Identity (`.spec.serviceAccountName` with +cloud providers), the controller feature gate `ObjectLevelWorkloadIdentity` must +be enabled. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: aws-object-level-workload-identity + namespace: default +spec: + interval: 5m0s + provider: aws + bucketName: podinfo + endpoint: s3.amazonaws.com + region: us-east-1 + serviceAccountName: aws-workload-identity-sa + timeout: 30s +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: aws-workload-identity-sa + namespace: default + annotations: + eks.amazonaws.com/role-arn: arn:aws:iam::123456789012:role/flux-bucket-role +``` + #### Azure When a Bucket's `.spec.provider` is set to `azure`, the source-controller will @@ -513,83 +567,39 @@ metadata: spec: interval: 5m0s provider: azure - bucketName: testsas - endpoint: https://testfluxsas.blob.core.windows.net + bucketName: testwi + endpoint: https://testfluxwi.blob.core.windows.net ``` -##### Deprecated: Managed Identity with AAD Pod Identity +##### Azure Object-Level Workload Identity example -If you are using [aad pod identity](https://azure.github.io/aad-pod-identity/docs), -You need to create an Azure Identity and give it access to Azure Blob Storage. - -```sh -export IDENTITY_NAME="blob-access" - -az role assignment create --role "Storage Blob Data Reader" \ ---assignee-object-id "$(az identity show -n $IDENTITY_NAME -o tsv --query principalId -g $RESOURCE_GROUP)" \ ---scope "/subscriptions//resourceGroups/$RESOURCE_GROUP/providers/Microsoft.Storage/storageAccounts//blobServices/default/containers/" - -export IDENTITY_CLIENT_ID="$(az identity show -n ${IDENTITY_NAME} -g ${RESOURCE_GROUP} -otsv --query clientId)" -export IDENTITY_RESOURCE_ID="$(az identity show -n ${IDENTITY_NAME} -otsv --query id)" -``` - -Create an AzureIdentity object that references the identity created above: +**Note:** To use Object-Level Workload Identity (`.spec.serviceAccountName` with +cloud providers), the controller feature gate `ObjectLevelWorkloadIdentity` must +be enabled. ```yaml --- -apiVersion: aadpodidentity.k8s.io/v1 -kind: AzureIdentity -metadata: - name: # source-controller label will match this name - namespace: flux-system -spec: - clientID: - resourceID: - type: 0 # user-managed identity -``` - -Create an AzureIdentityBinding object that binds Pods with a specific selector -with the AzureIdentity created: - -```yaml -apiVersion: "aadpodidentity.k8s.io/v1" -kind: AzureIdentityBinding -metadata: - name: ${IDENTITY_NAME}-binding -spec: - azureIdentity: ${IDENTITY_NAME} - selector: ${IDENTITY_NAME} -``` - -Label the source-controller Deployment correctly so that it can match an identity binding: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: kustomize-controller - namespace: flux-system -spec: - template: - metadata: - labels: - aadpodidbinding: ${IDENTITY_NAME} # match the AzureIdentity name -``` - -If you have set up aad-pod-identity correctly and labeled the source-controller -Deployment, then you don't need to reference a Secret. - -```yaml apiVersion: source.toolkit.fluxcd.io/v1 kind: Bucket metadata: - name: azure-bucket - namespace: flux-system + name: azure-object-level-workload-identity + namespace: default spec: interval: 5m0s provider: azure - bucketName: testsas - endpoint: https://testfluxsas.blob.core.windows.net + bucketName: testwi + endpoint: https://testfluxwi.blob.core.windows.net + serviceAccountName: azure-workload-identity-sa + timeout: 30s +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: azure-workload-identity-sa + namespace: default + annotations: + azure.workload.identity/client-id: + azure.workload.identity/tenant-id: ``` ##### Azure Blob SAS Token example @@ -644,29 +654,38 @@ Refer to the [Azure documentation](https://learn.microsoft.com/en-us/rest/api/st #### GCP -When a Bucket's `.spec.provider` is set to `gcp`, the source-controller will -attempt to communicate with the specified [Endpoint](#endpoint) using the -[Google Client SDK](https://github.com/googleapis/google-api-go-client). +For detailed setup instructions, see: https://fluxcd.io/flux/integrations/gcp/#for-google-cloud-storage -Without a [Secret reference](#secret-reference), authorization using a -workload identity is attempted by default. The workload identity is obtained -using the `GOOGLE_APPLICATION_CREDENTIALS` environment variable, falling back -to the Google Application Credential file in the config directory. -When a reference is specified, it expects a Secret with a `.data.serviceaccount` -value with a GCP service account JSON file. +##### GCP Controller-Level Workload Identity example -The Provider allows for specifying the -[Bucket location](https://cloud.google.com/storage/docs/locations) using the -[`.spec.region` field](#region). +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: gcp-controller-level-workload-identity + namespace: default +spec: + interval: 5m0s + provider: gcp + bucketName: podinfo + endpoint: storage.googleapis.com + region: us-east-1 + timeout: 30s +``` + +##### GCP Object-Level Workload Identity example -##### GCP example +**Note:** To use Object-Level Workload Identity (`.spec.serviceAccountName` with +cloud providers), the controller feature gate `ObjectLevelWorkloadIdentity` must +be enabled. ```yaml --- apiVersion: source.toolkit.fluxcd.io/v1 kind: Bucket metadata: - name: gcp-workload-identity + name: gcp-object-level-workload-identity namespace: default spec: interval: 5m0s @@ -674,7 +693,16 @@ spec: bucketName: podinfo endpoint: storage.googleapis.com region: us-east-1 + serviceAccountName: gcp-workload-identity-sa timeout: 30s +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: gcp-workload-identity-sa + namespace: default + annotations: + iam.gke.io/gcp-service-account: ``` ##### GCP static auth example @@ -840,10 +868,13 @@ See [Provider](#provider) for more (provider specific) examples. See [Provider](#provider) for more (provider specific) examples. -### Cert secret reference +### Mutual TLS Authentication `.spec.certSecretRef.name` is an optional field to specify a secret containing -TLS certificate data. The secret can contain the following keys: +TLS certificate data for mutual TLS authentication. + +To authenticate towards a bucket using mutual TLS, +the referenced Secret's `.data` should contain the following keys: * `tls.crt` and `tls.key`, to specify the client certificate and private key used for TLS client authentication. These must be used in conjunction, i.e. @@ -851,9 +882,6 @@ specifying one without the other will lead to an error. * `ca.crt`, to specify the CA certificate used to verify the server, which is required if the server is using a self-signed certificate. -If the server is using a self-signed certificate and has TLS client -authentication enabled, all three values are required. - The Secret should be of type `Opaque` or `kubernetes.io/tls`. All the files in the Secret are expected to be [PEM-encoded][pem-encoding]. Assuming you have three files; `client.key`, `client.crt` and `ca.crt` for the client private key, @@ -956,6 +984,29 @@ credentials for the object storage. For some `.spec.provider` implementations the presence of the field is required, see [Provider](#provider) for more details and examples. +### Service Account reference + +`.spec.serviceAccountName` is an optional field to specify a Service Account +in the same namespace as Bucket with purpose depending on the value of +the `.spec.provider` field: + +- When `.spec.provider` is set to `generic`, the controller will fetch the image + pull secrets attached to the Service Account and use them for authentication. +- When `.spec.provider` is set to `aws`, `azure`, or `gcp`, the Service Account + will be used for Workload Identity authentication. In this case, the controller + feature gate `ObjectLevelWorkloadIdentity` must be enabled, otherwise the + controller will error out. + +**Note:** that for a publicly accessible object storage, you don't need to +provide a `secretRef` nor `serviceAccountName`. + +**Important:** `.spec.secretRef` and `.spec.serviceAccountName` are mutually +exclusive and cannot be set at the same time. This constraint is enforced +at the CRD level. + +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + ### Prefix `.spec.prefix` is an optional field to enable server-side filtering diff --git a/docs/spec/v1/externalartifacts.md b/docs/spec/v1/externalartifacts.md new file mode 100644 index 000000000..1eccbe0e0 --- /dev/null +++ b/docs/spec/v1/externalartifacts.md @@ -0,0 +1,114 @@ +# External Artifacts + + + +The `ExternalArtifact` is a generic API designed for interoperability with Flux. +It allows 3rd party controllers to produce and store [Artifact](#artifact) objects +in the same way as Flux's own source-controller. +For more details on the design and motivation behind this API, +see [RFC-0012](https://github.com/fluxcd/flux2/tree/main/rfcs/0012-external-artifact). + +## Example + +The following is an example of a ExternalArtifact produced by a 3rd party +source controller: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: ExternalArtifact +metadata: + name: my-artifact + namespace: flux-system +spec: + sourceRef: + apiVersion: example.com/v1 + kind: Source + name: my-source +status: + artifact: + digest: sha256:35d47c9db0eee6ffe08a404dfb416bee31b2b79eabc3f2eb26749163ce487f52 + lastUpdateTime: "2025-08-21T13:37:31Z" + path: source/flux-system/my-source/35d47c9d.tar.gz + revision: v1.0.0@sha256:35d47c9db0eee6ffe08a404dfb416bee31b2b79eabc3f2eb26749163ce487f52 + size: 20914 + url: http://example-controller.flux-system.svc.cluster.local./source/flux-system/my-source/35d47c9d.tar.gz + conditions: + - lastTransitionTime: "2025-08-21T13:37:31Z" + message: stored artifact for revision v1.0.0 + observedGeneration: 1 + reason: Succeeded + status: "True" + type: Ready +``` + +## ExternalArtifact spec + +### Source reference + +The `spec.sourceRef` field is optional and contains a reference +to the custom resource that the ExternalArtifact is based on. + +The `spec.sourceRef` contains the following fields: + +- `apiVersion`: the API version of the custom resource. +- `kind`: the kind of the custom resource. +- `name`: the name of the custom resource. +- `namespace`: the namespace of the custom resource. If omitted, it defaults to the + namespace of the ExternalArtifact. + +## ExternalArtifact status + +### Artifact + +The ExternalArtifact reports the latest synchronized state +as an Artifact object in the `.status.artifact`. + +The `.status.artifact` contains the following fields: + +- `digest`: The checksum of the tar.gz file in the format `:`. +- `lastUpdateTime`: Timestamp of the last artifact update. +- `path`: Relative file path of the artifact in storage. +- `revision`: Human-readable identifier with version and checksum in the format `@:`. +- `size`: Number of bytes in the tar.gz file. +- `url`: In-cluster HTTP address for artifact retrieval. + +### Conditions + +The ExternalArtifact reports its status using Kubernetes standard conditions. + +#### Ready ExternalArtifact + +When the 3rd party controller has successfully produced and stored an +Artifact in storage, it sets a Condition with the following +attributes in the ExternalArtifact's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +The `message` field should contain a human-readable message indicating +the successful storage of the artifact and the associated revision. + +If the 3rd party controller performs a signature verification +of the artifact, and the verification is successful, a Condition with the +following attributes is added to the ExternalArtifact's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +The `message` field should contain a human-readable message indicating +the successful verification of the artifact and the associated verification method. + +#### Failed ExternalArtifact + +If the 3rd party controller fails to produce and store an Artifact, +it sets the `Ready` Condition status to `False`, and adds a Condition with +the following attributes to the ExternalArtifact's `.status.conditions`: + +- `type: Ready` +- `status: "False"` +- `reason: FetchFailed` | `reason: StorageOperationFailed` | `reason: VerificationFailed` + +The `message` field should contain a human-readable message indicating +the reason for the failure. diff --git a/docs/spec/v1/gitrepositories.md b/docs/spec/v1/gitrepositories.md index bf1602c3a..d39ee73d3 100644 --- a/docs/spec/v1/gitrepositories.md +++ b/docs/spec/v1/gitrepositories.md @@ -177,6 +177,31 @@ data: ca.crt: ``` +#### HTTPS Mutual TLS authentication + +To authenticate towards a Git repository over HTTPS using mutual TLS, +the referenced Secret's `.data` should contain the following keys: + +* `tls.crt` and `tls.key`, to specify the client certificate and private key used + for TLS client authentication. These must be used in conjunction, i.e. + specifying one without the other will lead to an error. +* `ca.crt`, to specify the CA certificate used to verify the server, which is + required if the server is using a self-signed certificate. + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: https-tls-certs + namespace: default +type: Opaque +data: + tls.crt: + tls.key: + ca.crt: +``` + #### SSH authentication To authenticate towards a Git repository over SSH, the referenced Secret is @@ -226,6 +251,9 @@ Supported options are: When provider is not specified, it defaults to `generic` indicating that mechanisms using `spec.secretRef` are used for authentication. +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + #### Azure The `azure` provider can be used to authenticate to Azure DevOps repositories @@ -329,6 +357,11 @@ same pattern. - The private key that was generated in the pre-requisites. - (Optional) GitHub Enterprise Server users can set the base URL to `http(s)://HOSTNAME/api/v3`. +- (Optional) If GitHub Enterprise Server uses a private CA, include its bundle (root and any intermediates) in `ca.crt`. + If the `ca.crt` is specified, then it will be used for TLS verification for all API / Git over `HTTPS` requests to the GitHub Enterprise Server. + +**NOTE:** If the secret contains `tls.crt`, `tls.key` then [mutual TLS configuration](#https-mutual-tls-authentication) will be automatically enabled. +Omit these keys if the GitHub server does not support mutual TLS. ```yaml apiVersion: v1 @@ -344,6 +377,10 @@ stringData: ... -----END RSA PRIVATE KEY----- githubAppBaseURL: "" #optional, required only for GitHub Enterprise Server users + ca.crt: | #optional, for GitHub Enterprise Server users + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- ``` Alternatively, the Flux CLI can be used to automatically create the secret with @@ -356,6 +393,24 @@ flux create secret githubapp ghapp-secret \ --app-private-key=~/private-key.pem ``` +### Service Account reference + +`.spec.serviceAccountName` is an optional field to specify a Service Account +in the same namespace as GitRepository with purpose depending on the value of +the `.spec.provider` field: + +- When `.spec.provider` is set to `azure`, the Service Account + will be used for Workload Identity authentication. In this case, the controller + feature gate `ObjectLevelWorkloadIdentity` must be enabled, otherwise the + controller will error out. For Azure DevOps specific setup, see the + [Azure DevOps integration guide](https://fluxcd.io/flux/integrations/azure/#for-azure-devops). + +**Note:** that for a publicly accessible git repository, you don't need to +provide a `secretRef` nor `serviceAccountName`. + +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + ### Interval `.spec.interval` is a required field that specifies the interval at which the @@ -590,6 +645,28 @@ list](#default-exclusions), and may overrule the [`.sourceignore` file exclusions](#sourceignore-file). See [excluding files](#excluding-files) for more information. +### Sparse checkout + +`.spec.sparseCheckout` is an optional field to specify list of directories to +checkout when cloning the repository. If specified, only the specified directory +contents will be present in the artifact produced for this repository. + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: GitRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m + url: https://github.com/stefanprodan/podinfo + ref: + branch: master + sparseCheckout: + - charts + - kustomize +``` + ### Suspend `.spec.suspend` is an optional field to suspend the reconciliation of a @@ -1132,6 +1209,27 @@ status: ... ``` +### Observed Sparse Checkout + +The source-controller reports observed sparse checkout in the GitRepository's +`.status.observedSparseCheckout`. The observed sparse checkout is the latest +`.spec.sparseCheckout` value which resulted in a [ready +state](#ready-gitrepository), or stalled due to error it can not recover from +without human intervention. The value is the same as the [sparseCheckout in +spec](#sparse-checkout). It indicates the sparse checkout configuration used in +building the current artifact in storage. It is also used by the controller to +determine if an artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedSparseCheckout: + - charts + - kustomize + ... +``` + ### Source Verification Mode The source-controller reports the Git object(s) it verified in the Git diff --git a/docs/spec/v1/helmrepositories.md b/docs/spec/v1/helmrepositories.md index 94d6c5af5..97fdff2ec 100644 --- a/docs/spec/v1/helmrepositories.md +++ b/docs/spec/v1/helmrepositories.md @@ -153,6 +153,9 @@ A HelmRepository also needs a Possible values are `default` for a Helm HTTP/S repository, or `oci` for an OCI Helm repository. +**Note:**: For improved support for OCI Helm charts, please use the +[`OCIRepository`](ocirepositories.md) API. + ### Provider `.spec.provider` is an optional field that allows specifying an OIDC provider used @@ -439,10 +442,13 @@ deprecated. Please use [`.spec.certSecretRef`](#cert-secret-reference) instead. If the controller uses the secret specified by this field to configure TLS, then a deprecation warning will be logged. -### Cert secret reference +### Mutual TLS Authentication `.spec.certSecretRef.name` is an optional field to specify a secret containing -TLS certificate data. The secret can contain the following keys: +TLS certificate data for mutual TLS authentication. + +To authenticate towards a Helm repository using mutual TLS, +the referenced Secret's `.data` should contain the following keys: * `tls.crt` and `tls.key`, to specify the client certificate and private key used for TLS client authentication. These must be used in conjunction, i.e. @@ -450,9 +456,6 @@ specifying one without the other will lead to an error. * `ca.crt`, to specify the CA certificate used to verify the server, which is required if the server is using a self-signed certificate. -If the server is using a self-signed certificate and has TLS client -authentication enabled, all three values are required. - The Secret should be of type `Opaque` or `kubernetes.io/tls`. All the files in the Secret are expected to be [PEM-encoded][pem-encoding]. Assuming you have three files; `client.key`, `client.crt` and `ca.crt` for the client private key, diff --git a/docs/spec/v1/ocirepositories.md b/docs/spec/v1/ocirepositories.md new file mode 100644 index 000000000..d2bfa399e --- /dev/null +++ b/docs/spec/v1/ocirepositories.md @@ -0,0 +1,1147 @@ +# OCI Repositories + + + +The `OCIRepository` API defines a Source to produce an Artifact for an OCI +repository. + +## Example + +The following is an example of an OCIRepository. It creates a tarball +(`.tar.gz`) Artifact with the fetched data from an OCI repository for the +resolved digest. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: oci://ghcr.io/stefanprodan/manifests/podinfo + ref: + tag: latest +``` + +In the above example: + +- An OCIRepository named `podinfo` is created, indicated by the + `.metadata.name` field. +- The source-controller checks the OCI repository every five minutes, indicated + by the `.spec.interval` field. +- It pulls the `latest` tag of the `ghcr.io/stefanprodan/manifests/podinfo` + repository, indicated by the `.spec.ref.tag` and `.spec.url` fields. +- The resolved tag and SHA256 digest is used as the Artifact + revision, reported in-cluster in the `.status.artifact.revision` field. +- When the current OCIRepository digest differs from the latest fetched + digest, a new Artifact is archived. +- The new Artifact is reported in the `.status.artifact` field. + +You can run this example by saving the manifest into `ocirepository.yaml`. + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f ocirepository.yaml + ``` + +2. Run `kubectl get ocirepository` to see the OCIRepository: + + ```console + NAME URL AGE READY STATUS + podinfo oci://ghcr.io/stefanprodan/manifests/podinfo 5s True stored artifact with revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' + ``` + +3. Run `kubectl describe ocirepository podinfo` to see the [Artifact](#artifact) + and [Conditions](#conditions) in the OCIRepository's Status: + + ```console + ... + Status: + Artifact: + Digest: sha256:d7e924b4882e55b97627355c7b3d2e711e9b54303afa2f50c25377f4df66a83b + Last Update Time: 2025-06-14T11:23:36Z + Path: ocirepository/default/podinfo/3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de.tar.gz + Revision: latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de + Size: 1105 + URL: http://source-controller.flux-system.svc.cluster.local./ocirepository/oci/podinfo/3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de.tar.gz + Conditions: + Last Transition Time: 2025-06-14T11:23:36Z + Message: stored artifact for revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: Ready + Last Transition Time: 2025-06-14T11:23:36Z + Message: stored artifact for revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: ArtifactInStorage + Observed Generation: 1 + URL: http://source-controller.source-system.svc.cluster.local./gitrepository/default/podinfo/latest.tar.gz + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal NewArtifact 62s source-controller stored artifact with revision 'latest/3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' from 'oci://ghcr.io/stefanprodan/manifests/podinfo' + ``` + +## Writing an OCIRepository spec + +As with all other Kubernetes config, an OCIRepository needs `apiVersion`, +`kind`, and `metadata` fields. The name of an OCIRepository object must be a +valid [DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +An OCIRepository also needs a +[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### URL + +`.spec.url` is a required field that specifies the address of the +container image repository in the format `oci://://`. + +**Note:** that specifying a tag or digest is not acceptable for this field. + +### Provider + +`.spec.provider` is an optional field that allows specifying an OIDC provider used for +authentication purposes. + +Supported options are: + +- `generic` +- `aws` +- `azure` +- `gcp` + +The `generic` provider can be used for public repositories or when +static credentials are used for authentication, either with +`spec.secretRef` or `spec.serviceAccountName`. +If you do not specify `.spec.provider`, it defaults to `generic`. + +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + +#### AWS + +The `aws` provider can be used to authenticate automatically using the EKS +worker node IAM role or IAM Role for Service Accounts (IRSA), and by extension +gain access to ECR. + +When the worker node IAM role has access to ECR, source-controller running on it +will also have access to ECR. + +When using IRSA to enable access to ECR, add the following patch to your +bootstrap repository, in the `flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + annotations: + eks.amazonaws.com/role-arn: + target: + kind: ServiceAccount + name: source-controller +``` + +Note that you can attach the AWS managed policy `arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly` +to the IAM role when using IRSA. + +#### Azure + +The `azure` provider can be used to authenticate automatically using Workload Identity and Kubelet Managed +Identity to gain access to ACR. + +##### Kubelet Managed Identity + +When the kubelet managed identity has access to ACR, source-controller running +on it will also have access to ACR. + +**Note:** If you have more than one identity configured on the cluster, you have to specify which one to use +by setting the `AZURE_CLIENT_ID` environment variable in the source-controller deployment. + +If you are running into further issues, please look at the +[troubleshooting guide](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azidentity/TROUBLESHOOTING.md#azure-virtual-machine-managed-identity). + +##### Workload Identity + +When using Workload Identity to enable access to ACR, add the following patch to +your bootstrap repository, in the `flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: |- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + namespace: flux-system + annotations: + azure.workload.identity/client-id: + labels: + azure.workload.identity/use: "true" + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: source-controller + namespace: flux-system + labels: + azure.workload.identity/use: "true" + spec: + template: + metadata: + labels: + azure.workload.identity/use: "true" +``` + +Ensure Workload Identity is properly set up on your cluster and the mutating webhook is installed. +Create an identity that has access to ACR. Next, establish +a federated identity between the source-controller ServiceAccount and the +identity. Patch the source-controller Deployment and ServiceAccount as shown in the patch +above. Please take a look at this [guide](https://azure.github.io/azure-workload-identity/docs/quick-start.html#6-establish-federated-identity-credential-between-the-identity-and-the-service-account-issuer--subject). + +#### GCP + +The `gcp` provider can be used to authenticate automatically using OAuth scopes +or Workload Identity, and by extension gain access to GCR or Artifact Registry. + +When the GKE nodes have the appropriate OAuth scope for accessing GCR and +Artifact Registry, source-controller running on it will also have access to them. + +When using Workload Identity to enable access to GCR or Artifact Registry, add +the following patch to your bootstrap repository, in the +`flux-system/kustomization.yaml` file: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + annotations: + iam.gke.io/gcp-service-account: + target: + kind: ServiceAccount + name: source-controller +``` + +The Artifact Registry service uses the permission `artifactregistry.repositories.downloadArtifacts` +that is located under the Artifact Registry Reader role. If you are using +Google Container Registry service, the needed permission is instead `storage.objects.list` +which can be bound as part of the Container Registry Service Agent role. +Take a look at [this guide](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) +for more information about setting up GKE Workload Identity. + +### Secret reference + +`.spec.secretRef.name` is an optional field to specify a name reference to a +Secret in the same namespace as the OCIRepository, containing authentication +credentials for the OCI repository. + +This secret is expected to be in the same format as [`imagePullSecrets`][image-pull-secrets]. +The usual way to create such a secret is with: + +```sh +kubectl create secret docker-registry ... +``` + +### Service Account reference + +`.spec.serviceAccountName` is an optional field to specify a Service Account +in the same namespace as OCIRepository with purpose depending on the value of +the `.spec.provider` field: + +- When `.spec.provider` is set to `generic`, the controller will fetch the image + pull secrets attached to the Service Account and use them for authentication. +- When `.spec.provider` is set to `aws`, `azure`, or `gcp`, the Service Account + will be used for Workload Identity authentication. In this case, the controller + feature gate `ObjectLevelWorkloadIdentity` must be enabled, otherwise the + controller will error out. + +**Note:** that for a publicly accessible image repository, you don't need to +provide a `secretRef` nor `serviceAccountName`. + +For a complete guide on how to set up authentication for cloud providers, +see the integration [docs](/flux/integrations/). + +### Mutual TLS Authentication + +`.spec.certSecretRef.name` is an optional field to specify a secret containing +TLS certificate data for mutual TLS authentication. + +To authenticate towards an OCI repository using mutual TLS, +the referenced Secret's `.data` should contain the following keys: + +* `tls.crt` and `tls.key`, to specify the client certificate and private key used +for TLS client authentication. These must be used in conjunction, i.e. +specifying one without the other will lead to an error. +* `ca.crt`, to specify the CA certificate used to verify the server, which is +required if the server is using a self-signed certificate. + +The Secret should be of type `Opaque` or `kubernetes.io/tls`. All the files in +the Secret are expected to be [PEM-encoded][pem-encoding]. Assuming you have +three files; `client.key`, `client.crt` and `ca.crt` for the client private key, +client certificate and the CA certificate respectively, you can generate the +required Secret using the `flux create secret tls` command: + +```sh +flux create secret tls --tls-key-file=client.key --tls-crt-file=client.crt --ca-crt-file=ca.crt +``` + +Example usage: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: example + namespace: default +spec: + interval: 5m0s + url: oci://example.com + certSecretRef: + name: example-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: example-tls + namespace: default +type: kubernetes.io/tls # or Opaque +data: + tls.crt: + tls.key: + # NOTE: Can be supplied without the above values + ca.crt: +``` + +### Proxy secret reference + +`.spec.proxySecretRef.name` is an optional field used to specify the name of a +Secret that contains the proxy settings for the object. These settings are used +for all the remote operations related to the OCIRepository. +The Secret can contain three keys: + +- `address`, to specify the address of the proxy server. This is a required key. +- `username`, to specify the username to use if the proxy server is protected by + basic authentication. This is an optional key. +- `password`, to specify the password to use if the proxy server is protected by + basic authentication. This is an optional key. + +Example: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: http-proxy +type: Opaque +stringData: + address: http://proxy.com + username: mandalorian + password: grogu +``` + +Proxying can also be configured in the source-controller Deployment directly by +using the standard environment variables such as `HTTPS_PROXY`, `ALL_PROXY`, etc. + +`.spec.proxySecretRef.name` takes precedence over all environment variables. + +**Warning:** [Cosign](https://github.com/sigstore/cosign) *keyless* +[verification](#verification) is not supported for this API. If you +require cosign keyless verification to use a proxy you must use the +standard environment variables mentioned above. If you specify a +`proxySecretRef` the controller will simply send out the requests +needed for keyless verification without the associated object-level +proxy settings. + +### Insecure + +`.spec.insecure` is an optional field to allow connecting to an insecure (HTTP) +container registry server, if set to `true`. The default value is `false`, +denying insecure (HTTP) connections. + +### Interval + +`.spec.interval` is a required field that specifies the interval at which the +OCI repository must be fetched. + +After successfully reconciling the object, the source-controller requeues it +for inspection after the specified interval. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `10m0s` to reconcile the object every 10 minutes. + +If the `.metadata.generation` of a resource changes (due to e.g. a change to +the spec), this is handled instantly outside the interval window. + +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple OCIRepository objects are +set up with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + +### Timeout + +`.spec.timeout` is an optional field to specify a timeout for OCI operations +like pulling. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `1m30s` for a timeout of one minute and thirty seconds. The default value +is `60s`. + +### Reference + +`.spec.ref` is an optional field to specify the OCI reference to resolve and +watch for changes. References are specified in one or more subfields +(`.tag`, `.semver`, `.digest`), with latter listed fields taking +precedence over earlier ones. If not specified, it defaults to the `latest` +tag. + +#### Tag example + +To pull a specific tag, use `.spec.ref.tag`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + ref: + tag: "" +``` + +#### SemVer example + +To pull a tag based on a +[SemVer range](https://github.com/Masterminds/semver#checking-version-constraints), +use `.spec.ref.semver`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + ref: + # SemVer range reference: https://github.com/Masterminds/semver#checking-version-constraints + semver: "" +``` + +This field takes precedence over [`.tag`](#tag-example). + +#### SemverFilter example + +`.spec.ref.semverFilter` is an optional field to specify a SemVer filter to apply +when fetching tags from the OCI repository. The filter is a regular expression +that is applied to the tags fetched from the repository. Only tags that match +the filter are considered for the semver range resolution. + +**Note:** The filter is only taken into account when the `.spec.ref.semver` field +is set. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: podinfo + namespace: default +spec: + interval: 5m0s + url: oci://ghcr.io/stefanprodan/manifests/podinfo + ref: + # SemVer comparisons using constraints without a prerelease comparator will skip prerelease versions. + # Adding a `-0` suffix to the semver range will include prerelease versions. + semver: ">= 6.1.x-0" + semverFilter: ".*-rc.*" +``` + +In the above example, the controller fetches tags from the `ghcr.io/stefanprodan/manifests/podinfo` +repository and filters them using the regular expression `.*-rc.*`. Only tags that +contain the `-rc` suffix are considered for the semver range resolution. + +#### Digest example + +To pull a specific digest, use `.spec.ref.digest`: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + ref: + digest: "sha256:" +``` + +This field takes precedence over all other fields. + +### Layer selector + +`spec.layerSelector` is an optional field to specify which layer should be extracted from the OCI Artifact. +If not specified, the controller will extract the first layer found in the artifact. + +To extract a layer matching a specific +[OCI media type](https://github.com/opencontainers/image-spec/blob/v1.0.2/media-types.md): + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + layerSelector: + mediaType: "application/vnd.cncf.helm.chart.content.v1.tar+gzip" + operation: extract # can be 'extract' or 'copy', defaults to 'extract' +``` + +If the layer selector matches more than one layer, the first layer matching the specified media type will be used. +Note that the selected OCI layer must be +[compressed](https://github.com/opencontainers/image-spec/blob/v1.0.2/layer.md#gzip-media-types) +in the `tar+gzip` format. + +When `.spec.layerSelector.operation` is set to `copy`, instead of extracting the +compressed layer, the controller copies the tarball as-is to storage, thus +keeping the original content unaltered. + +### Ignore + +`.spec.ignore` is an optional field to specify rules in [the `.gitignore` +pattern format](https://git-scm.com/docs/gitignore#_pattern_format). Paths +matching the defined rules are excluded while archiving. + +When specified, `.spec.ignore` overrides the [default exclusion +list](#default-exclusions), and may overrule the [`.sourceignore` file +exclusions](#sourceignore-file). See [excluding files](#excluding-files) +for more information. + +### Verification + +`.spec.verify` is an optional field to enable the verification of [Cosign](https://github.com/sigstore/cosign) +or [Notation](https://github.com/notaryproject/notation) +signatures. The field offers three subfields: + +- `.provider`, to specify the verification provider. The supported options are `cosign` and `notation` at present. +- `.secretRef.name`, to specify a reference to a Secret in the same namespace as + the OCIRepository, containing the Cosign public keys of trusted authors. For Notation this Secret should also + include the [trust policy](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/trust-store-trust-policy.md#trust-policy) in + addition to the CA certificate. +- `.matchOIDCIdentity`, to specify a list of OIDC identity matchers (only supported when using `cosign` as the + verification provider). Please see + [Keyless verification](#keyless-verification) for more details. + +#### Cosign + +The `cosign` provider can be used to verify the signature of an OCI artifact using either a known public key +or via the [Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + verify: + provider: cosign + secretRef: + name: cosign-public-keys +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the OCIRepository's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +##### Public keys verification + +To verify the authenticity of an OCI artifact, create a Kubernetes secret +with the Cosign public keys: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: cosign-public-keys +type: Opaque +data: + key1.pub: + key2.pub: +``` + +Note that the keys must have the `.pub` extension for Flux to make use of them. + +Flux will loop over the public keys and use them to verify an artifact's signature. +This allows for older artifacts to be valid as long as the right key is in the secret. + +##### Keyless verification + +For publicly available OCI artifacts, which are signed using the +[Cosign Keyless](https://github.com/sigstore/cosign/blob/main/KEYLESS.md) procedure, +you can enable the verification by omitting the `.verify.secretRef` field. + +To verify the identity's subject and the OIDC issuer present in the Fulcio +certificate, you can specify a list of OIDC identity matchers using +`.spec.verify.matchOIDCIdentity`. The matcher provides two required fields: + +- `.issuer`, to specify a regexp that matches against the OIDC issuer. +- `.subject`, to specify a regexp that matches against the subject identity in + the certificate. +Both values should follow the [Go regular expression syntax](https://golang.org/s/re2syntax). + +The matchers are evaluated in an OR fashion, i.e. the identity is deemed to be +verified if any one matcher successfully matches against the identity. + +Example of verifying artifacts signed by the +[Cosign GitHub Action](https://github.com/sigstore/cosign-installer) with GitHub OIDC Token: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: podinfo +spec: + interval: 5m + url: oci://ghcr.io/stefanprodan/manifests/podinfo + verify: + provider: cosign + matchOIDCIdentity: + - issuer: "^https://token.actions.githubusercontent.com$" + subject: "^https://github.com/stefanprodan/podinfo.*$" +``` + +The controller verifies the signatures using the Fulcio root CA and the Rekor +instance hosted at [rekor.sigstore.dev](https://rekor.sigstore.dev/). + +Note that keyless verification is an **experimental feature**, using +custom root CAs or self-hosted Rekor instances are not currently supported. + +#### Notation + +The `notation` provider can be used to verify the signature of an OCI artifact using known +trust policy and CA certificate. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + verify: + provider: notation + secretRef: + name: notation-config +``` + +When the verification succeeds, the controller adds a Condition with the +following attributes to the OCIRepository's `.status.conditions`: + +- `type: SourceVerified` +- `status: "True"` +- `reason: Succeeded` + +To verify the authenticity of an OCI artifact, create a Kubernetes secret +containing Certificate Authority (CA) root certificates and the a `trust policy` + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: notation-config +type: Opaque +data: + certificate1.pem: + certificate2.crt: + trustpolicy.json: +``` + +Note that the CA certificates must have either `.pem` or `.crt` extension and your trust policy must +be named `trustpolicy.json` for Flux to make use of them. + +For more information on the signing and verification process see [Signing and Verification Workflow](https://github.com/notaryproject/specifications/blob/v1.0.0/specs/signing-and-verification-workflow.md). + +Flux will loop over the certificates and use them to verify an artifact's signature. +This allows for older artifacts to be valid as long as the right certificate is in the secret. + +### Suspend + +`.spec.suspend` is an optional field to suspend the reconciliation of a +OCIRepository. When set to `true`, the controller will stop reconciling the +OCIRepository, and changes to the resource or in the OCI repository will not +result in a new Artifact. When the field is set to `false` or removed, it will +resume. + +## Working with OCIRepositories + +### Excluding files + +By default, files which match the [default exclusion rules](#default-exclusions) +are excluded while archiving the OCI repository contents as an Artifact. +It is possible to overwrite and/or overrule the default exclusions using +the [`.spec.ignore` field](#ignore). + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + ignore: | + # exclude all + /* + # include deploy dir + !/deploy + # exclude file extensions from deploy dir + /deploy/**/*.md + /deploy/**/*.txt +``` + +#### `.sourceignore` file + +Excluding files is possible by adding a `.sourceignore` file in the artifact. +The `.sourceignore` file follows [the `.gitignore` pattern +format](https://git-scm.com/docs/gitignore#_pattern_format), and pattern +entries may overrule [default exclusions](#default-exclusions). + +The controller recursively loads ignore files so a `.sourceignore` can be +placed in the artifact root or in subdirectories. + +### Triggering a reconcile + +To manually tell the source-controller to reconcile a OCIRepository outside the +[specified interval window](#interval), an OCIRepository can be annotated with +`reconcile.fluxcd.io/requestedAt: `. Annotating the resource +queues the OCIRepository for reconciliation if the `` differs +from the last value the controller acted on, as reported in +[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at). + +Using `kubectl`: + +```sh +kubectl annotate --field-manager=flux-client-side-apply --overwrite ocirepository/ reconcile.fluxcd.io/requestedAt="$(date +%s)" +``` + +Using `flux`: + +```sh +flux reconcile source oci +``` + +### Waiting for `Ready` + +When a change is applied, it is possible to wait for the OCIRepository to reach +a [ready state](#ready-ocirepository) using `kubectl`: + +```sh +kubectl wait gitrepository/ --for=condition=ready --timeout=1m +``` + +### Suspending and resuming + +When you find yourself in a situation where you temporarily want to pause the +reconciliation of an OCIRepository, you can suspend it using the +[`.spec.suspend` field](#suspend). + +#### Suspend an OCIRepository + +In your YAML declaration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + suspend: true +``` + +Using `kubectl`: + +```sh +kubectl patch ocirepository --field-manager=flux-client-side-apply -p '{\"spec\": {\"suspend\" : true }}' +``` + +Using `flux`: + +```sh +flux suspend source oci +``` + +**Note:** When an OCIRepository has an Artifact and it is suspended, and this +Artifact later disappears from the storage due to e.g. the source-controller +Pod being evicted from a Node, this will not be reflected in the +OCIRepository's Status until it is resumed. + +#### Resume an OCIRepository + +In your YAML declaration, comment out (or remove) the field: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +spec: + # suspend: true +``` + +**Note:** Setting the field value to `false` has the same effect as removing +it, but does not allow for "hot patching" using e.g. `kubectl` while practicing +GitOps; as the manually applied patch would be overwritten by the declared +state in Git. + +Using `kubectl`: + +```sh +kubectl patch ocirepository --field-manager=flux-client-side-apply -p '{\"spec\" : {\"suspend\" : false }}' +``` + +Using `flux`: + +```sh +flux resume source oci +``` + +### Debugging an OCIRepository + +There are several ways to gather information about a OCIRepository for +debugging purposes. + +#### Describe the OCIRepository + +Describing an OCIRepository using +`kubectl describe ocirepository ` +displays the latest recorded information for the resource in the `Status` and +`Events` sections: + +```console +... +Status: +... + Conditions: + Last Transition Time: 2025-02-14T09:40:27Z + Message: processing object: new generation 1 -> 2 + Observed Generation: 2 + Reason: ProgressingWithRetry + Status: True + Type: Reconciling + Last Transition Time: 2025-02-14T09:40:27Z + Message: failed to pull artifact from 'oci://ghcr.io/stefanprodan/manifests/podinfo': couldn't find tag "0.0.1" + Observed Generation: 2 + Reason: OCIOperationFailed + Status: False + Type: Ready + Last Transition Time: 2025-02-14T09:40:27Z + Message: failed to pull artifact from 'oci://ghcr.io/stefanprodan/manifests/podinfo': couldn't find tag "0.0.1" + Observed Generation: 2 + Reason: OCIOperationFailed + Status: True + Type: FetchFailed + Observed Generation: 1 + URL: http://source-controller.source-system.svc.cluster.local./ocirepository/default/podinfo/latest.tar.gz +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning OCIOperationFailed 2s (x9 over 4s) source-controller failed to pull artifact from 'oci://ghcr.io/stefanprodan/manifests/podinfo': couldn't find tag "0.0.1" +``` + +#### Trace emitted Events + +To view events for specific OCIRepository(s), `kubectl events` can be used +in combination with `--for` to list the Events for specific objects. For +example, running + +```sh +kubectl events --for OCIRepository/ +``` + +lists + +```console +LAST SEEN TYPE REASON OBJECT MESSAGE +2m14s Normal NewArtifact ocirepository/ stored artifact for revision 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' +36s Normal ArtifactUpToDate ocirepository/ artifact up-to-date with remote revision: 'latest@sha256:3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de' +94s Warning OCIOperationFailed ocirepository/ failed to pull artifact from 'oci://ghcr.io/stefanprodan/manifests/podinfo': couldn't find tag "0.0.1" +``` + +Besides being reported in Events, the reconciliation errors are also logged by +the controller. The Flux CLI offer commands for filtering the logs for a +specific OCIRepository, e.g. +`flux logs --level=error --kind=OCIRepository --name=`. + +## OCIRepository Status + +### Artifact + +The OCIRepository reports the latest synchronized state from the OCI repository +as an Artifact object in the `.status.artifact` of the resource. + +The `.status.artifact.revision` holds the tag and SHA256 digest of the upstream OCI artifact. + +The `.status.artifact.metadata` holds the upstream OCI artifact metadata such as the +[OpenContainers standard annotations](https://github.com/opencontainers/image-spec/blob/main/annotations.md). +If the OCI artifact was created with `flux push artifact`, then the `metadata` will contain the following +annotations: +- `org.opencontainers.image.created` the date and time on which the artifact was built +- `org.opencontainers.image.source` the URL of the Git repository containing the source files +- `org.opencontainers.image.revision` the Git branch and commit SHA1 of the source files + +The Artifact file is a gzip compressed TAR archive (`.tar.gz`), and +can be retrieved in-cluster from the `.status.artifact.url` HTTP address. + +#### Artifact example + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: OCIRepository +metadata: + name: +status: + artifact: + digest: sha256:9f3bc0f341d4ecf2bab460cc59320a2a9ea292f01d7b96e32740a9abfd341088 + lastUpdateTime: "2025-08-08T09:35:45Z" + metadata: + org.opencontainers.image.created: "2025-08-08T12:31:41+03:00" + org.opencontainers.image.revision: 6.1.8/b3b00fe35424a45d373bf4c7214178bc36fd7872 + org.opencontainers.image.source: https://github.com/stefanprodan/podinfo.git + path: ocirepository///.tar.gz + revision: @ + size: 1105 + url: http://source-controller..svc.cluster.local./ocirepository///.tar.gz +``` + +#### Default exclusions + +The following files and extensions are excluded from the Artifact by +default: + +- Git files (`.git/, .gitignore, .gitmodules, .gitattributes`) +- File extensions (`.jpg, .jpeg, .gif, .png, .wmv, .flv, .tar.gz, .zip`) +- CI configs (`.github/, .circleci/, .travis.yml, .gitlab-ci.yml, appveyor.yml, .drone.yml, cloudbuild.yaml, codeship-services.yml, codeship-steps.yml`) +- CLI configs (`.goreleaser.yml, .sops.yaml`) +- Flux v1 config (`.flux.yaml`) + +To define your own exclusion rules, see [excluding files](#excluding-files). + +### Conditions + +OCIRepository has various states during its lifecycle, reflected as +[Kubernetes Conditions][typical-status-properties]. +It can be [reconciling](#reconciling-ocirepository) while fetching the remote +state, it can be [ready](#ready-ocirepository), or it can [fail during +reconciliation](#failed-ocirepository). + +The OCIRepository API is compatible with the [kstatus specification][kstatus-spec], +and reports `Reconciling` and `Stalled` conditions where applicable to +provide better (timeout) support to solutions polling the OCIRepository to +become `Ready`. + +#### Reconciling OCIRepository + +The source-controller marks an OCIRepository as _reconciling_ when one of the +following is true: + +- There is no current Artifact for the OCIRepository, or the reported Artifact + is determined to have disappeared from the storage. +- The generation of the OCIRepository is newer than the [Observed + Generation](#observed-generation). +- The newly resolved Artifact digest differs from the current Artifact. + +When the OCIRepository is "reconciling", the `Ready` Condition status becomes +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the OCIRepository's `.status.conditions`: + +- `type: Reconciling` +- `status: "True"` +- `reason: Progressing` | `reason: ProgressingWithRetry` + +If the reconciling state is due to a new revision, an additional Condition is +added with the following attributes: + +- `type: ArtifactOutdated` +- `status: "True"` +- `reason: NewRevision` + +Both Conditions have a ["negative polarity"][typical-status-properties], +and are only present on the OCIRepository while their status value is `"True"`. + +#### Ready OCIRepository + +The source-controller marks an OCIRepository as _ready_ when it has the +following characteristics: + +- The OCIRepository reports an [Artifact](#artifact). +- The reported Artifact exists in the controller's Artifact storage. +- The controller was able to communicate with the remote OCI repository using + the current spec. +- The digest of the reported Artifact is up-to-date with the latest + resolved digest of the remote OCI repository. + +When the OCIRepository is "ready", the controller sets a Condition with the +following attributes in the OCIRepository's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +This `Ready` Condition will retain a status value of `"True"` until the +OCIRepository is marked as [reconciling](#reconciling-ocirepository), or e.g. a +[transient error](#failed-ocirepository) occurs due to a temporary network issue. + +When the OCIRepository Artifact is archived in the controller's Artifact +storage, the controller sets a Condition with the following attributes in the +OCIRepository's `.status.conditions`: + +- `type: ArtifactInStorage` +- `status: "True"` +- `reason: Succeeded` + +This `ArtifactInStorage` Condition will retain a status value of `"True"` until +the Artifact in the storage no longer exists. + +#### Failed OCIRepository + +The source-controller may get stuck trying to produce an Artifact for a +OCIRepository without completing. This can occur due to some of the following +factors: + +- The remote OCI repository [URL](#url) is temporarily unavailable. +- The OCI repository does not exist. +- The [Secret reference](#secret-reference) contains a reference to a + non-existing Secret. +- The credentials in the referenced Secret are invalid. +- The OCIRepository spec contains a generic misconfiguration. +- A storage related failure when storing the artifact. + +When this happens, the controller sets the `Ready` Condition status to `False`, +and adds a Condition with the following attributes to the OCIRepository's +`.status.conditions`: + +- `type: FetchFailed` | `type: IncludeUnavailable` | `type: StorageOperationFailed` +- `status: "True"` +- `reason: AuthenticationFailed` | `reason: OCIArtifactPullFailed` | `reason: OCIArtifactLayerOperationFailed` + +This condition has a ["negative polarity"][typical-status-properties], +and is only present on the OCIRepository while the status value is `"True"`. +There may be more arbitrary values for the `reason` field to provide accurate +reason for a condition. + +In addition to the above Condition types, when the signature +[verification](#verification) fails. A condition with +the following attributes is added to the GitRepository's `.status.conditions`: + +- `type: SourceVerified` +- `status: "False"` +- `reason: VerificationError` + +While the OCIRepository has one or more of these Conditions, the controller +will continue to attempt to produce an Artifact for the resource with an +exponential backoff, until it succeeds and the OCIRepository is marked as +[ready](#ready-ocirepository). + +Note that a OCIRepository can be [reconciling](#reconciling-ocirepository) +while failing at the same time, for example due to a newly introduced +configuration issue in the OCIRepository spec. When a reconciliation fails, the +`Reconciling` Condition reason would be `ProgressingWithRetry`. When the +reconciliation is performed again after the failure, the reason is updated to +`Progressing`. + +### Observed Ignore + +The source-controller reports an observed ignore in the OCIRepository's +`.status.observedIgnore`. The observed ignore is the latest `.spec.ignore` value +which resulted in a [ready state](#ready-ocirepository), or stalled due to error +it can not recover from without human intervention. The value is the same as the +[ignore in spec](#ignore). It indicates the ignore rules used in building the +current artifact in storage. It is also used by the controller to determine if +an artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedIgnore: | + hpa.yaml + build + ... +``` + +### Observed Layer Selector + +The source-controller reports an observed layer selector in the OCIRepository's +`.status.observedLayerSelector`. The observed layer selector is the latest +`.spec.layerSelector` value which resulted in a [ready state](#ready-ocirepository), +or stalled due to error it can not recover from without human intervention. +The value is the same as the [layer selector in spec](#layer-selector). +It indicates the layer selection configuration used in building the current +artifact in storage. It is also used by the controller to determine if an +artifact needs to be rebuilt. + +Example: +```yaml +status: + ... + observedLayerSelector: + mediaType: application/vnd.cncf.helm.chart.content.v1.tar+gzip + operation: copy + ... +``` + +### Observed Generation + +The source-controller reports an [observed generation][typical-status-properties] +in the OCIRepository's `.status.observedGeneration`. The observed generation is +the latest `.metadata.generation` which resulted in either a [ready state](#ready-ocirepository), +or stalled due to error it can not recover from without human +intervention. + +### Last Handled Reconcile At + +The source-controller reports the last `reconcile.fluxcd.io/requestedAt` +annotation value it acted on in the `.status.lastHandledReconcileAt` field. + +For practical information about this field, see [triggering a +reconcile](#triggering-a-reconcile). + +[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus +[image-pull-secrets]: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod +[image-auto-provider-secrets]: https://fluxcd.io/flux/guides/image-update/#imagerepository-cloud-providers-authentication +[pem-encoding]: https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail +[sops-guide]: https://fluxcd.io/flux/guides/mozilla-sops/ diff --git a/go.mod b/go.mod index ca4b76058..21c15753e 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/fluxcd/source-controller -go 1.23.0 +go 1.25.0 replace github.com/fluxcd/source-controller/api => ./api @@ -9,108 +9,108 @@ replace github.com/fluxcd/source-controller/api => ./api replace github.com/opencontainers/go-digest => github.com/opencontainers/go-digest v1.0.1-0.20220411205349-bde1400a84be require ( - cloud.google.com/go/compute/metadata v0.6.0 - cloud.google.com/go/storage v1.48.0 + cloud.google.com/go/compute/metadata v0.8.0 + cloud.google.com/go/storage v1.56.1 github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 - github.com/Masterminds/semver/v3 v3.3.1 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 + github.com/Masterminds/semver/v3 v3.4.0 github.com/cyphar/filepath-securejoin v0.4.1 - github.com/distribution/distribution/v3 v3.0.0-rc.2 - github.com/docker/cli v27.5.1+incompatible + github.com/distribution/distribution/v3 v3.0.0 + github.com/docker/cli v28.4.0+incompatible github.com/docker/go-units v0.5.0 - github.com/elazarl/goproxy v1.7.0 - github.com/fluxcd/cli-utils v0.36.0-flux.12 - github.com/fluxcd/pkg/apis/event v0.16.0 - github.com/fluxcd/pkg/apis/meta v1.10.0 - github.com/fluxcd/pkg/auth v0.9.0 - github.com/fluxcd/pkg/cache v0.7.0 - github.com/fluxcd/pkg/git v0.25.0 - github.com/fluxcd/pkg/git/gogit v0.25.0 - github.com/fluxcd/pkg/gittestserver v0.16.0 - github.com/fluxcd/pkg/helmtestserver v0.23.0 - github.com/fluxcd/pkg/lockedfile v0.5.0 - github.com/fluxcd/pkg/masktoken v0.6.0 - github.com/fluxcd/pkg/oci v0.45.0 - github.com/fluxcd/pkg/runtime v0.53.1 - github.com/fluxcd/pkg/sourceignore v0.11.0 - github.com/fluxcd/pkg/ssh v0.17.0 - github.com/fluxcd/pkg/tar v0.11.0 - github.com/fluxcd/pkg/testserver v0.10.0 - github.com/fluxcd/pkg/version v0.6.0 - github.com/fluxcd/source-controller/api v1.5.0 + github.com/elazarl/goproxy v1.7.2 + github.com/fluxcd/cli-utils v0.36.0-flux.15 + github.com/fluxcd/pkg/apis/event v0.19.0 + github.com/fluxcd/pkg/apis/meta v1.21.0 + github.com/fluxcd/pkg/artifact v0.3.0 + github.com/fluxcd/pkg/auth v0.31.0 + github.com/fluxcd/pkg/cache v0.11.0 + github.com/fluxcd/pkg/git v0.36.0 + github.com/fluxcd/pkg/git/gogit v0.40.0 + github.com/fluxcd/pkg/gittestserver v0.20.0 + github.com/fluxcd/pkg/helmtestserver v0.30.0 + github.com/fluxcd/pkg/http/transport v0.7.0 + github.com/fluxcd/pkg/masktoken v0.8.0 + github.com/fluxcd/pkg/oci v0.56.0 + github.com/fluxcd/pkg/runtime v0.84.0 + github.com/fluxcd/pkg/sourceignore v0.14.0 + github.com/fluxcd/pkg/ssh v0.21.0 + github.com/fluxcd/pkg/tar v0.14.0 + github.com/fluxcd/pkg/testserver v0.13.0 + github.com/fluxcd/pkg/version v0.10.0 + github.com/fluxcd/source-controller/api v1.7.0 github.com/foxcpp/go-mockdns v1.1.0 github.com/go-git/go-billy/v5 v5.6.2 - github.com/go-git/go-git/v5 v5.13.2 - github.com/go-logr/logr v1.4.2 - github.com/google/go-containerregistry v0.20.3 - github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20241111191718-6bce25ecf029 + github.com/go-git/go-git/v5 v5.16.2 + github.com/go-logr/logr v1.4.3 + github.com/google/go-containerregistry v0.20.6 + github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20250613215107-59a4b8593039 github.com/google/uuid v1.6.0 - github.com/minio/minio-go/v7 v7.0.87 - github.com/notaryproject/notation-core-go v1.2.0 - github.com/notaryproject/notation-go v1.3.0 - github.com/onsi/gomega v1.36.2 + github.com/minio/minio-go/v7 v7.0.94 + github.com/notaryproject/notation-core-go v1.3.0 + github.com/notaryproject/notation-go v1.3.2 + github.com/onsi/gomega v1.38.2 github.com/opencontainers/go-digest v1.0.0 - github.com/opencontainers/go-digest/blake3 v0.0.0-20240426182413-22b78e47854a - github.com/opencontainers/image-spec v1.1.0 - github.com/ory/dockertest/v3 v3.11.0 + github.com/opencontainers/image-spec v1.1.1 + github.com/ory/dockertest/v3 v3.12.0 github.com/otiai10/copy v1.14.1 github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 - github.com/prometheus/client_golang v1.21.0 - github.com/sigstore/cosign/v2 v2.4.1 - github.com/sigstore/sigstore v1.8.15 + github.com/prometheus/client_golang v1.23.0 + github.com/sigstore/cosign/v2 v2.5.2 + github.com/sigstore/sigstore v1.9.5 github.com/sirupsen/logrus v1.9.3 - github.com/spf13/pflag v1.0.6 - golang.org/x/crypto v0.34.0 - golang.org/x/oauth2 v0.26.0 - golang.org/x/sync v0.11.0 - google.golang.org/api v0.211.0 - gotest.tools v2.2.0+incompatible - helm.sh/helm/v3 v3.17.0 - k8s.io/api v0.32.1 - k8s.io/apimachinery v0.32.1 - k8s.io/client-go v0.32.1 - k8s.io/utils v0.0.0-20241210054802-24370beab758 - oras.land/oras-go/v2 v2.5.0 - sigs.k8s.io/controller-runtime v0.20.1 - sigs.k8s.io/yaml v1.4.0 + github.com/spf13/pflag v1.0.10 + golang.org/x/crypto v0.41.0 + golang.org/x/oauth2 v0.30.0 + golang.org/x/sync v0.16.0 + google.golang.org/api v0.248.0 + helm.sh/helm/v3 v3.19.0 + k8s.io/api v0.34.0 + k8s.io/apimachinery v0.34.0 + k8s.io/client-go v0.34.0 + k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 + oras.land/oras-go/v2 v2.6.0 + sigs.k8s.io/controller-runtime v0.22.1 + sigs.k8s.io/yaml v1.6.0 ) require ( - cel.dev/expr v0.19.1 // indirect - cloud.google.com/go v0.116.0 // indirect - cloud.google.com/go/auth v0.12.1 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect - cloud.google.com/go/iam v1.2.2 // indirect - cloud.google.com/go/monitoring v1.21.2 // indirect + cel.dev/expr v0.24.0 // indirect + cloud.google.com/go v0.121.6 // indirect + cloud.google.com/go/auth v0.16.5 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect + cloud.google.com/go/iam v1.5.2 // indirect + cloud.google.com/go/monitoring v1.24.2 // indirect dario.cat/mergo v1.0.1 // indirect - filippo.io/edwards25519 v1.1.0 // indirect github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 // indirect github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect - github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/Azure/azure-sdk-for-go/sdk/containers/azcontainerregistry v0.2.3 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice v1.0.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.29 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect - github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect - github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect - github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect - github.com/Azure/go-autorest/logger v0.2.1 // indirect - github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/Azure/go-autorest/autorest v0.11.30 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.24 // indirect + github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 // indirect + github.com/Azure/go-autorest/autorest/azure/cli v0.4.7 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.1 // indirect + github.com/Azure/go-autorest/logger v0.2.2 // indirect + github.com/Azure/go-autorest/tracing v0.6.1 // indirect github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.4.0 // indirect - github.com/BurntSushi/toml v1.4.0 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect + github.com/BurntSushi/toml v1.5.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Masterminds/squirrel v1.5.4 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect - github.com/ProtonMail/go-crypto v1.1.5 // indirect + github.com/ProtonMail/go-crypto v1.3.0 // indirect github.com/ThalesIgnite/crypto11 v1.2.5 // indirect github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 // indirect github.com/alibabacloud-go/cr-20160607 v1.0.1 // indirect @@ -124,49 +124,50 @@ require ( github.com/alibabacloud-go/tea-xml v1.1.3 // indirect github.com/aliyun/credentials-go v1.3.2 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go-v2 v1.35.0 // indirect - github.com/aws/aws-sdk-go-v2/config v1.29.3 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.56 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.26 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.30 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.30 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ecr v1.40.0 // indirect - github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.21.5 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.11 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.24.13 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.12 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.33.11 // indirect - github.com/aws/smithy-go v1.22.2 // indirect - github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8 // indirect + github.com/aws/aws-sdk-go-v2 v1.38.3 // indirect + github.com/aws/aws-sdk-go-v2/config v1.31.6 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.18.10 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ecr v1.50.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.37.2 // indirect + github.com/aws/aws-sdk-go-v2/service/eks v1.73.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.29.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.38.2 // indirect + github.com/aws/smithy-go v1.23.0 // indirect + github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.9.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver v3.5.1+incompatible // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/bradleyfalzon/ghinstallation/v2 v2.14.0 // indirect + github.com/bradleyfalzon/ghinstallation/v2 v2.16.0 // indirect github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect - github.com/buildkite/agent/v3 v3.81.0 // indirect - github.com/buildkite/go-pipeline v0.13.1 // indirect - github.com/buildkite/interpolate v0.1.3 // indirect - github.com/buildkite/roko v1.2.0 // indirect + github.com/buildkite/agent/v3 v3.98.2 // indirect + github.com/buildkite/go-pipeline v0.13.3 // indirect + github.com/buildkite/interpolate v0.1.5 // indirect + github.com/buildkite/roko v1.3.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect github.com/clbanning/mxj/v2 v2.7.0 // indirect - github.com/cloudflare/circl v1.5.0 // indirect - github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect + github.com/cloudflare/circl v1.6.1 // indirect + github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be // indirect - github.com/containerd/containerd v1.7.24 // indirect - github.com/containerd/continuity v0.4.3 // indirect - github.com/containerd/errdefs v0.3.0 // indirect + github.com/containerd/containerd v1.7.28 // indirect + github.com/containerd/continuity v0.4.5 // indirect + github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect - github.com/coreos/go-oidc/v3 v3.12.0 // indirect + github.com/coreos/go-oidc/v3 v3.15.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect @@ -174,106 +175,103 @@ require ( github.com/dimchansky/utfbom v1.1.1 // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v27.5.0+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.2 // indirect + github.com/docker/docker v28.3.3+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.3 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect - github.com/envoyproxy/go-control-plane v0.13.0 // indirect - github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect - github.com/evanphx/json-patch v5.9.0+incompatible // indirect + github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect + github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect + github.com/evanphx/json-patch v5.9.11+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect - github.com/fatih/color v1.17.0 // indirect + github.com/fatih/color v1.18.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fluxcd/gitkit v0.6.0 // indirect - github.com/fluxcd/pkg/apis/acl v0.6.0 // indirect - github.com/fsnotify/fsnotify v1.8.0 // indirect - github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/fluxcd/pkg/apis/acl v0.9.0 // indirect + github.com/fluxcd/pkg/lockedfile v0.7.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect github.com/go-chi/chi v4.1.2+incompatible // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-ini/ini v1.67.0 // indirect - github.com/go-jose/go-jose/v3 v3.0.3 // indirect - github.com/go-jose/go-jose/v4 v4.0.4 // indirect + github.com/go-jose/go-jose/v3 v3.0.4 // indirect + github.com/go-jose/go-jose/v4 v4.1.1 // indirect github.com/go-ldap/ldap/v3 v3.4.10 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/analysis v0.23.0 // indirect - github.com/go-openapi/errors v0.22.0 // indirect - github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/errors v0.22.1 // indirect + github.com/go-openapi/jsonpointer v0.21.1 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/loads v0.22.0 // indirect github.com/go-openapi/runtime v0.28.0 // indirect github.com/go-openapi/spec v0.21.0 // indirect github.com/go-openapi/strfmt v0.23.0 // indirect - github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/swag v0.23.1 // indirect github.com/go-openapi/validate v0.24.0 // indirect - github.com/go-piv/piv-go v1.11.0 // indirect - github.com/go-viper/mapstructure/v2 v2.1.0 // indirect + github.com/go-piv/piv-go/v2 v2.3.0 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/goccy/go-json v0.10.5 // indirect github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.5.1 // indirect - github.com/golang-jwt/jwt/v5 v5.2.1 // indirect + github.com/golang-jwt/jwt/v4 v4.5.2 // indirect + github.com/golang-jwt/jwt/v5 v5.3.0 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect - github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/certificate-transparency-go v1.2.1 // indirect - github.com/google/gnostic-models v0.6.9 // indirect + github.com/google/certificate-transparency-go v1.3.2 // indirect + github.com/google/gnostic-models v0.7.0 // indirect github.com/google/go-cmp v0.7.0 // indirect - github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa // indirect - github.com/google/go-github/v55 v55.0.0 // indirect - github.com/google/go-github/v69 v69.2.0 // indirect + github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20250225234217-098045d5e61f // indirect + github.com/google/go-github/v72 v72.0.0 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect - github.com/google/s2a-go v0.1.8 // indirect + github.com/google/s2a-go v0.1.9 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect - github.com/googleapis/gax-go/v2 v2.14.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect + github.com/googleapis/gax-go/v2 v2.15.0 // indirect github.com/gorilla/handlers v1.5.2 // indirect github.com/gorilla/mux v1.8.1 // indirect - github.com/gorilla/websocket v1.5.3 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/gosuri/uitable v0.0.4 // indirect + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + github.com/hashicorp/go-retryablehttp v0.7.8 // indirect github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect - github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/huandu/xstrings v1.5.0 // indirect + github.com/in-toto/attestation v1.1.1 // indirect github.com/in-toto/in-toto-golang v0.9.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmoiron/sqlx v1.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/klauspost/compress v1.17.11 // indirect - github.com/klauspost/cpuid/v2 v2.2.9 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect github.com/lib/pq v1.10.9 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.9.0 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect github.com/miekg/dns v1.1.58 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/minio/crc64nvme v1.0.1 // indirect @@ -281,15 +279,14 @@ require ( github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect - github.com/moby/locker v1.0.1 // indirect github.com/moby/spdystream v0.5.0 // indirect github.com/moby/sys/user v0.3.0 // indirect - github.com/moby/term v0.5.0 // indirect + github.com/moby/term v0.5.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/mozillazg/docker-credential-acr-helper v0.4.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -299,128 +296,134 @@ require ( github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/oleiade/reflections v1.1.0 // indirect + github.com/opencontainers/go-digest/blake3 v0.0.0-20250813155314-89707e38ad1a // indirect github.com/opencontainers/runc v1.2.4 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/otiai10/mint v1.6.3 // indirect github.com/pborman/uuid v1.2.1 // indirect - github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect - github.com/pjbgf/sha1cd v0.3.2 // indirect + github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect + github.com/pjbgf/sha1cd v0.4.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.62.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/otlptranslator v0.0.2 // indirect + github.com/prometheus/procfs v0.17.0 // indirect github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 // indirect - github.com/redis/go-redis/v9 v9.7.0 // indirect - github.com/rivo/uniseg v0.4.4 // indirect + github.com/redis/go-redis/v9 v9.8.0 // indirect + github.com/rivo/uniseg v0.4.7 // indirect github.com/rs/xid v1.6.0 // indirect - github.com/rubenv/sql-migrate v1.7.1 // indirect + github.com/rubenv/sql-migrate v1.8.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sagikazarmark/locafero v0.4.0 // indirect - github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sagikazarmark/locafero v0.7.0 // indirect + github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect github.com/sassoftware/relic v7.2.1+incompatible // indirect github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect github.com/segmentio/ksuid v1.0.4 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect github.com/shopspring/decimal v1.4.0 // indirect - github.com/sigstore/fulcio v1.6.3 // indirect - github.com/sigstore/protobuf-specs v0.4.0 // indirect - github.com/sigstore/rekor v1.3.6 // indirect - github.com/sigstore/timestamp-authority v1.2.2 // indirect + github.com/sigstore/fulcio v1.7.1 // indirect + github.com/sigstore/protobuf-specs v0.4.3 // indirect + github.com/sigstore/rekor v1.3.10 // indirect + github.com/sigstore/sigstore-go v1.0.0 // indirect + github.com/sigstore/timestamp-authority v1.2.8 // indirect github.com/skeema/knownhosts v1.3.1 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect - github.com/spf13/cobra v1.8.1 // indirect - github.com/spf13/viper v1.19.0 // indirect - github.com/spiffe/go-spiffe/v2 v2.3.0 // indirect + github.com/spf13/afero v1.12.0 // indirect + github.com/spf13/cast v1.7.1 // indirect + github.com/spf13/cobra v1.10.1 // indirect + github.com/spf13/viper v1.20.1 // indirect + github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/thales-e-security/pool v0.0.2 // indirect github.com/theupdateframework/go-tuf v0.7.0 // indirect + github.com/theupdateframework/go-tuf/v2 v2.1.1 // indirect + github.com/tinylib/msgp v1.3.0 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/tjfoc/gmsm v1.4.1 // indirect github.com/transparency-dev/merkle v0.0.2 // indirect - github.com/vbatts/tar-split v0.11.6 // indirect + github.com/vbatts/tar-split v0.12.1 // indirect github.com/veraison/go-cose v1.3.0 // indirect github.com/x448/float16 v0.8.4 // indirect - github.com/xanzy/go-gitlab v0.109.0 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect github.com/zeebo/blake3 v0.2.3 // indirect - github.com/zeebo/errs v1.3.0 // indirect + github.com/zeebo/errs v1.4.0 // indirect + gitlab.com/gitlab-org/api/client-go v0.130.1 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect - go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 // indirect - go.opentelemetry.io/contrib/detectors/gcp v1.29.0 // indirect - go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect - go.opentelemetry.io/otel v1.34.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.54.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 // indirect - go.opentelemetry.io/otel/log v0.8.0 // indirect - go.opentelemetry.io/otel/metric v1.34.0 // indirect - go.opentelemetry.io/otel/sdk v1.33.0 // indirect - go.opentelemetry.io/otel/sdk/log v0.8.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.34.0 // indirect - go.opentelemetry.io/proto/otlp v1.4.0 // indirect - go.step.sm/crypto v0.51.2 // indirect + go.opentelemetry.io/contrib/bridges/prometheus v0.63.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.38.0 // indirect + go.opentelemetry.io/contrib/exporters/autoexport v0.63.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.60.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 // indirect + go.opentelemetry.io/otel/log v0.14.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/sdk v1.38.0 // indirect + go.opentelemetry.io/otel/sdk/log v0.14.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.opentelemetry.io/proto/otlp v1.8.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/mod v0.22.0 // indirect - golang.org/x/net v0.35.0 // indirect - golang.org/x/sys v0.30.0 // indirect - golang.org/x/term v0.29.0 // indirect - golang.org/x/text v0.22.0 // indirect - golang.org/x/time v0.9.0 // indirect - golang.org/x/tools v0.29.0 // indirect - gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect - google.golang.org/grpc v1.68.1 // indirect - google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a // indirect - google.golang.org/protobuf v1.36.4 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect + golang.org/x/mod v0.27.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/term v0.34.0 // indirect + golang.org/x/text v0.28.0 // indirect + golang.org/x/time v0.12.0 // indirect + golang.org/x/tools v0.36.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect + google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/grpc v1.75.0 // indirect + google.golang.org/protobuf v1.36.8 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.32.1 // indirect - k8s.io/apiserver v0.32.1 // indirect - k8s.io/cli-runtime v0.32.1 // indirect - k8s.io/component-base v0.32.1 // indirect + k8s.io/apiextensions-apiserver v0.34.0 // indirect + k8s.io/apiserver v0.34.0 // indirect + k8s.io/cli-runtime v0.34.0 // indirect + k8s.io/component-base v0.34.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect - k8s.io/kubectl v0.32.1 // indirect - oras.land/oras-go v1.2.5 // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + k8s.io/kubectl v0.34.0 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect - sigs.k8s.io/kustomize/api v0.19.0 // indirect - sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect - sigs.k8s.io/release-utils v0.8.4 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.5.0 // indirect + sigs.k8s.io/kustomize/api v0.20.1 // indirect + sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/release-utils v0.11.1 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect ) retract v0.32.0 // Refers to incorrect ./api version. diff --git a/go.sum b/go.sum index 54278e3fc..369cd9509 100644 --- a/go.sum +++ b/go.sum @@ -1,32 +1,32 @@ -cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= -cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= -cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= -cloud.google.com/go/auth v0.12.1 h1:n2Bj25BUMM0nvE9D2XLTiImanwZhO3DkfWSYS/SAJP4= -cloud.google.com/go/auth v0.12.1/go.mod h1:BFMu+TNpF3DmvfBO9ClqTR/SiqVIm7LukKF9mbendF4= -cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= -cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= -cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= -cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= -cloud.google.com/go/iam v1.2.2 h1:ozUSofHUGf/F4tCNy/mu9tHLTaxZFLOUiKzjcgWHGIA= -cloud.google.com/go/iam v1.2.2/go.mod h1:0Ys8ccaZHdI1dEUilwzqng/6ps2YB6vRsjIe00/+6JY= -cloud.google.com/go/kms v1.20.1 h1:og29Wv59uf2FVaZlesaiDAqHFzHaoUyHI3HYp9VUHVg= -cloud.google.com/go/kms v1.20.1/go.mod h1:LywpNiVCvzYNJWS9JUcGJSVTNSwPwi0vBAotzDqn2nc= -cloud.google.com/go/logging v1.12.0 h1:ex1igYcGFd4S/RZWOCU51StlIEuey5bjqwH9ZYjHibk= -cloud.google.com/go/logging v1.12.0/go.mod h1:wwYBt5HlYP1InnrtYI0wtwttpVU1rifnMT7RejksUAM= -cloud.google.com/go/longrunning v0.6.2 h1:xjDfh1pQcWPEvnfjZmwjKQEcHnpz6lHjfy7Fo0MK+hc= -cloud.google.com/go/longrunning v0.6.2/go.mod h1:k/vIs83RN4bE3YCswdXC5PFfWVILjm3hpEUlSko4PiI= -cloud.google.com/go/monitoring v1.21.2 h1:FChwVtClH19E7pJ+e0xUhJPGksctZNVOk2UhMmblmdU= -cloud.google.com/go/monitoring v1.21.2/go.mod h1:hS3pXvaG8KgWTSz+dAdyzPrGUYmi2Q+WFX8g2hqVEZU= -cloud.google.com/go/storage v1.48.0 h1:FhBDHACbVtdPx7S/AbcKujPWiHvfO6F8OXGgCEbB2+o= -cloud.google.com/go/storage v1.48.0/go.mod h1:aFoDYNMAjv67lp+xcuZqjUKv/ctmplzQ3wJgodA7b+M= -cloud.google.com/go/trace v1.11.2 h1:4ZmaBdL8Ng/ajrgKqY5jfvzqMXbrDcBsUGXOT9aqTtI= -cloud.google.com/go/trace v1.11.2/go.mod h1:bn7OwXd4pd5rFuAnTrzBuoZ4ax2XQeG3qNgYmfCy0Io= -cuelabs.dev/go/oci/ociregistry v0.0.0-20240404174027-a39bec0462d2 h1:BnG6pr9TTr6CYlrJznYUDj6V7xldD1W+1iXPum0wT/w= -cuelabs.dev/go/oci/ociregistry v0.0.0-20240404174027-a39bec0462d2/go.mod h1:pK23AUVXuNzzTpfMCA06sxZGeVQ/75FdVtW249de9Uo= -cuelang.org/go v0.9.2 h1:pfNiry2PdRBr02G/aKm5k2vhzmqbAOoaB4WurmEbWvs= -cuelang.org/go v0.9.2/go.mod h1:qpAYsLOf7gTM1YdEg6cxh553uZ4q9ZDWlPbtZr9q1Wk= +cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= +cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= +cloud.google.com/go/auth v0.16.5 h1:mFWNQ2FEVWAliEQWpAdH80omXFokmrnbDhUS9cBywsI= +cloud.google.com/go/auth v0.16.5/go.mod h1:utzRfHMP+Vv0mpOkTRQoWD2q3BatTOoWbA7gCc2dUhQ= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.8.0 h1:HxMRIbao8w17ZX6wBnjhcDkW6lTFpgcaobyVfZWqRLA= +cloud.google.com/go/compute/metadata v0.8.0/go.mod h1:sYOGTp851OV9bOFJ9CH7elVvyzopvWQFNNghtDQ/Biw= +cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= +cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= +cloud.google.com/go/kms v1.22.0 h1:dBRIj7+GDeeEvatJeTB19oYZNV0aj6wEqSIT/7gLqtk= +cloud.google.com/go/kms v1.22.0/go.mod h1:U7mf8Sva5jpOb4bxYZdtw/9zsbIjrklYwPcvMk34AL8= +cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc= +cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= +cloud.google.com/go/storage v1.56.1 h1:n6gy+yLnHn0hTwBFzNn8zJ1kqWfR91wzdM8hjRF4wP0= +cloud.google.com/go/storage v1.56.1/go.mod h1:C9xuCZgFl3buo2HZU/1FncgvvOgTAs/rnh4gF4lMg0s= +cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= +cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= +cuelabs.dev/go/oci/ociregistry v0.0.0-20241125120445-2c00c104c6e1 h1:mRwydyTyhtRX2wXS3mqYWzR2qlv6KsmoKXmlz5vInjg= +cuelabs.dev/go/oci/ociregistry v0.0.0-20241125120445-2c00c104c6e1/go.mod h1:5A4xfTzHTXfeVJBU6RAUf+QrlfTCW+017q/QiW+sMLg= +cuelang.org/go v0.12.1 h1:5I+zxmXim9MmiN2tqRapIqowQxABv2NKTgbOspud1Eo= +cuelang.org/go v0.12.1/go.mod h1:B4+kjvGGQnbkz+GuAv1dq/R308gTkp0sO28FdMrJ2Kw= dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= @@ -39,72 +39,79 @@ github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0/go.mod h1:tlqp9mUGbsP+0z3Q+c0Q5MgSdq/OMwQhm5bffR3Q3ss= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 h1:F0gBpfdPLGsw+nsgk6aqqkZS1jiixa5WwFe3fk/T3Ys= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2/go.mod h1:SqINnQ9lVVdRlyC8cd1lCI0SdX4n2paeABd2K8ggfnE= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.0 h1:ci6Yd6nysBRLEodoziB6ah1+YOzZbZk+NYneoA6q+6E= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.0/go.mod h1:QyVsSSN64v5TGltphKLQ2sQxe4OBQg0J1eKRcVBnfgE= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0 h1:MhRfI58HblXzCtWEZCO0feHs8LweePB3s90r7WaR1KU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0/go.mod h1:okZ+ZURbArNdlJ+ptXoyHNuOETzOl1Oww19rm8I2WLA= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0 h1:DRiANoJTiW6obBQe3SqZizkuV1PEgfiiGivmVocDy64= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0/go.mod h1:qLIye2hwb/ZouqhpSD9Zn3SJipvpEnz1Ywl3VUk9Y0s= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 h1:mlmW46Q0B79I+Aj4azKC6xDMFN9a9SyZWESlGWYXbFs= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0/go.mod h1:PXe2h+LKcWTX9afWdZoHyODqR4fBa5boUM/8uJfZ0Jo= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/azure-sdk-for-go/sdk/containers/azcontainerregistry v0.2.3 h1:ldKsKtEIblsgsr6mPwrd9yRntoX6uLz/K89wsldwx/k= +github.com/Azure/azure-sdk-for-go/sdk/containers/azcontainerregistry v0.2.3/go.mod h1:MAm7bk0oDLmD8yIkvfbxPW04fxzphPyL+7GzwHxOp6Y= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice v1.0.0 h1:figxyQZXzZQIcP3njhC68bYUiTw45J8/SsHaLW8Ax0M= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice v1.0.0/go.mod h1:TmlMW4W5OvXOmOyKNnor8nlMMiO1ctIyzmHme/VHsrA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.3.1 h1:Wgf5rZba3YZqeTNJPtvqZoBu1sBN/L4sry+u2U3Y75w= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.3.1/go.mod h1:xxCBG/f/4Vbmh2XQJBsOmNdxWUY5j/s27jujKPbQf14= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.1 h1:bFWuoEKg+gImo7pvkiQEFAc8ocibADgXeiLAxWhWmkI= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.1/go.mod h1:Vih/3yc6yac2JzU4hzpaDupBJP0Flaia9rXXrU8xyww= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 h1:FwladfywkNirM+FZYLBR2kBz5C8Tg0fw5w5Y7meRXWI= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2/go.mod h1:vv5Ad0RrIoT1lJFdWBZwt4mB1+j+V8DUroixmKDTCdk= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= -github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= -github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= +github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= +github.com/Azure/go-autorest/autorest v0.11.30 h1:iaZ1RGz/ALZtN5eq4Nr1SOFSlf2E4pDI3Tcsl+dZPVE= +github.com/Azure/go-autorest/autorest v0.11.30/go.mod h1:t1kpPIOpIVX7annvothKvb0stsrXa37i7b+xpmBW8Fs= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= -github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= -github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc= +github.com/Azure/go-autorest/autorest/adal v0.9.24 h1:BHZfgGsGwdkHDyZdtQRQk1WeUdW0m2WPAwuHZwUi5i4= +github.com/Azure/go-autorest/autorest/adal v0.9.24/go.mod h1:7T1+g0PYFmACYW5LlG2fcoPiPlFHjClyRGL7dRlP5c8= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 h1:Ov8avRZi2vmrE2JcXw+tu5K/yB41r7xK9GZDiBF7NdM= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13/go.mod h1:5BAVfWLWXihP47vYrPuBKKf4cS0bXI+KM9Qx6ETDJYo= github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.7 h1:Q9R3utmFg9K1B4OYtAZ7ZUUvIUdzQt7G2MN5Hi/d670= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.7/go.mod h1:bVrAueELJ0CKLBpUHDIvD516TwmHmzqwCpvONWRsw3s= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/date v0.3.1 h1:o9Z8Jyt+VJJTCZ/UORishuHOusBwolhjokt9s5k8I4w= +github.com/Azure/go-autorest/autorest/date v0.3.1/go.mod h1:Dz/RDmXlfiFFS/eW+b/xMUSFs1tboPVy6UjgADToWDM= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/logger v0.2.2 h1:hYqBsEBywrrOSW24kkOCXRcKfKhK76OzLTfF+MYDE2o= +github.com/Azure/go-autorest/logger v0.2.2/go.mod h1:I5fg9K52o+iuydlWfa9T5K6WFos9XYr9dYTFzpqgibw= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/Azure/go-autorest/tracing v0.6.1 h1:YUMSrC/CeD1ZnnXcNYU4a/fzsO35u2Fsful9L/2nyR0= +github.com/Azure/go-autorest/tracing v0.6.1/go.mod h1:/3EgjbsjraOqiicERAeu3m7/z0x1TzjQGAwDrJrXGkc= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.4.0 h1:MUkXAnvvDHgvPItl0nBj0hgk0f7hnnQbGm0h0+YxbN4= -github.com/AzureAD/microsoft-authentication-library-for-go v1.4.0/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= -github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 h1:pB2F2JKCj1Znmp2rwxxt1J0Fg0wezTMgWYk5Mpbi1kg= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1 h1:oTX4vsorBZo/Zdum6OKPA4o7544hm6smoRv1QjpTwGo= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1/go.mod h1:0wEl7vrAD8mehJyohS9HZy+WyEOaQO2mJx86Cvh93kM= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 h1:8nn+rsCvTq9axyEh382S0PFLBeaFwNsT43IrPWzctRU= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 h1:owcC2UnmsZycprQ5RfRgjydWhuoxg71LUfyiQdijZuM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0/go.mod h1:ZPpqegjbE99EPKsu3iUWV22A04wzGPcAY/ziSIQEEgs= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0 h1:4LP6hvB4I5ouTbGgWtixJhgED6xdf67twf9PoY96Tbg= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0/go.mod h1:jUZ5LYlw40WMd07qxcQJD5M40aUxrfwqQX1g7zxYnrQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 h1:Ron4zCA/yk6U7WOBXhTJcDpsUBG9npumK6xw2auFltQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0/go.mod h1:cSgYe11MCNYunTnRXrKiR/tHc0eoKjICUuWpNZoVCOo= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= -github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= @@ -112,18 +119,14 @@ github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA4 github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/Microsoft/hcsshim v0.11.7 h1:vl/nj3Bar/CvJSYo7gIQPyRWc9f3c6IeSNavBTSZNZQ= -github.com/Microsoft/hcsshim v0.11.7/go.mod h1:MV8xMfmECjl5HdO7U/3/hFVnkmSBjAjmA09d4bExKcU= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= -github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= -github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/ProtonMail/go-crypto v1.1.5 h1:eoAQfK2dwL+tFSFpr7TbOaPNUbPiJj4fLYwwGE1FQO4= -github.com/ProtonMail/go-crypto v1.1.5/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw= +github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE= github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= -github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= -github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= +github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM= +github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= @@ -178,42 +181,44 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= -github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/aws/aws-sdk-go-v2 v1.35.0 h1:jTPxEJyzjSuuz0wB+302hr8Eu9KUI+Zv8zlujMGJpVI= -github.com/aws/aws-sdk-go-v2 v1.35.0/go.mod h1:JgstGg0JjWU1KpVJjD5H0y0yyAIpSdKEq556EI6yOOM= -github.com/aws/aws-sdk-go-v2/config v1.29.3 h1:a5Ucjxe6iV+LHEBmYA9w40rT5aGxWybx/4l/O/fvJlE= -github.com/aws/aws-sdk-go-v2/config v1.29.3/go.mod h1:pt9z1x12zDiDb4iFLrxoeAKLVCU/Gp9DL/5BnwlY77o= -github.com/aws/aws-sdk-go-v2/credentials v1.17.56 h1:JKMBreKudV+ozx6rZJLvEtiexv48aEdhdC7mXUw9MLs= -github.com/aws/aws-sdk-go-v2/credentials v1.17.56/go.mod h1:S3xRjIHD8HHFgMTz4L56q/7IldfNtGL9JjH/vP3U6DA= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.26 h1:XMBqBEuZLf8yxtH+mU/uUDyQbN4iD/xv9h6he2+lzhw= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.26/go.mod h1:d0+wQ/3CYGPuHEfBTPpQdfUX7gjk0/Lxs5Q6KzdEGY8= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.30 h1:+7AzSGNhHoY53di13lvztf9Dyd/9ofzoYGBllkWp3a0= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.30/go.mod h1:Jxd/FrCny99yURiQiMywgXvBhd7tmgdv6KdlUTNzMSo= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.30 h1:Ex06eY6I5rO7IX0HalGfa5nGjpBoOsS1Qm3xfjkuszs= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.30/go.mod h1:AvyEMA9QcX59kFhVizBpIBpEMThUTXssuJe+emBdcGM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/service/ecr v1.40.0 h1:xRfaDubEUjVjKVUS9zJ5bE/L2EtEZ0eGP/tu2qFRXjU= -github.com/aws/aws-sdk-go-v2/service/ecr v1.40.0/go.mod h1:Qs6VY+BqNhwfLzphJGPVUGz/VnFkQBt7T4C2GB357+s= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.21.5 h1:PQp21GBlGNaQ+AVJAB8w2KTmLx0DkFS2fDET2Iy3+f0= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.21.5/go.mod h1:WMntdAol8KgeYsa5sDZPsRTXs4jVZIMYu0eQVVIQxnc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.11 h1:5JKQ2J3BBW4ovy6A/5Lwx9SpA6IzgH8jB3bquGZ1NUw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.11/go.mod h1:VShCk7rfCzK/b9U1aSkzLwcOoaDlYna16482QqEavis= -github.com/aws/aws-sdk-go-v2/service/kms v1.35.7 h1:v0D1LeMkA/X+JHAZWERrr+sUGOt8KrCZKnJA6KszkcE= -github.com/aws/aws-sdk-go-v2/service/kms v1.35.7/go.mod h1:K9lwD0Rsx9+NSaJKsdAdlDK4b2G4KKOEve9PzHxPoMI= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.13 h1:q4pOAKxypbFoUJzOpgo939bF50qb4DgYshiDfcsdN0M= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.13/go.mod h1:G/0PTg7+vQT42ictQGjJhixzTcVZtHFvrN/OeTXrRfQ= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.12 h1:4sGSGshSSfO1vrcXruPick3ioSf8nhhD6nuB2ni37P4= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.12/go.mod h1:NHpu/pLOelViA4qxkAFH10VLqh+XeLhZfXDaFyMVgSs= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.11 h1:RIXOjp7Dp4siCYJRwBHUcBdVgOWflSJGlq4ZhMI5Ta0= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.11/go.mod h1:ZR17k9bPKPR8u0IkyA6xVsjr56doNQ4ZB1fs7abYBfE= -github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= -github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= -github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8 h1:SoFYaT9UyGkR0+nogNyD/Lj+bsixB+SNuAS4ABlEs6M= -github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8/go.mod h1:2JF49jcDOrLStIXN/j/K1EKRq8a8R2qRnlZA6/o/c7c= +github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE= +github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.38.3 h1:B6cV4oxnMs45fql4yRH+/Po/YU+597zgWqvDpYMturk= +github.com/aws/aws-sdk-go-v2 v1.38.3/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= +github.com/aws/aws-sdk-go-v2/config v1.31.6 h1:a1t8fXY4GT4xjyJExz4knbuoxSCacB5hT/WgtfPyLjo= +github.com/aws/aws-sdk-go-v2/config v1.31.6/go.mod h1:5ByscNi7R+ztvOGzeUaIu49vkMk2soq5NaH5PYe33MQ= +github.com/aws/aws-sdk-go-v2/credentials v1.18.10 h1:xdJnXCouCx8Y0NncgoptztUocIYLKeQxrCgN6x9sdhg= +github.com/aws/aws-sdk-go-v2/credentials v1.18.10/go.mod h1:7tQk08ntj914F/5i9jC4+2HQTAuJirq7m1vZVIhEkWs= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6 h1:wbjnrrMnKew78/juW7I2BtKQwa1qlf6EjQgS69uYY14= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6/go.mod h1:AtiqqNrDioJXuUgz3+3T0mBWN7Hro2n9wll2zRUc0ww= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6 h1:uF68eJA6+S9iVr9WgX1NaRGyQ/6MdIyc4JNUo6TN1FA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6/go.mod h1:qlPeVZCGPiobx8wb1ft0GHT5l+dc6ldnwInDFaMvC7Y= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6 h1:pa1DEC6JoI0zduhZePp3zmhWvk/xxm4NB8Hy/Tlsgos= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6/go.mod h1:gxEjPebnhWGJoaDdtDkA0JX46VRg1wcTHYe63OfX5pE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= +github.com/aws/aws-sdk-go-v2/service/ecr v1.50.1 h1:lcwFjRx3C/hBxJzoWkD6DIG2jeB+mzLmFVBFVOadxxE= +github.com/aws/aws-sdk-go-v2/service/ecr v1.50.1/go.mod h1:qt9OL5kXqWoSub4QAkOF74mS3M2zOTNxMODqgwEUjt8= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.37.2 h1:EfatDVSMFxaS5TiR0C0zssQU1Nm+rGx3VbUGIH1y274= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.37.2/go.mod h1:oRy1IEgzXtOkEk4B/J7HZbXUC258drDLtkmc++lN7IA= +github.com/aws/aws-sdk-go-v2/service/eks v1.73.1 h1:Txq5jxY/ao+2Vx/kX9+65WTqkzCnxSlXnwIj+Cr/fng= +github.com/aws/aws-sdk-go-v2/service/eks v1.73.1/go.mod h1:+hYFg3laewH0YCfJRv+o5R3bradDKmFIm/uaiaD1U7U= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6 h1:LHS1YAIJXJ4K9zS+1d/xa9JAA9sL2QyXIQCQFQW/X08= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6/go.mod h1:c9PCiTEuh0wQID5/KqA32J+HAgZxN9tOGXKCiYJjTZI= +github.com/aws/aws-sdk-go-v2/service/kms v1.41.0 h1:2jKyib9msVrAVn+lngwlSplG13RpUZmzVte2yDao5nc= +github.com/aws/aws-sdk-go-v2/service/kms v1.41.0/go.mod h1:RyhzxkWGcfixlkieewzpO3D4P4fTMxhIDqDZWsh0u/4= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.1 h1:8OLZnVJPvjnrxEwHFg9hVUof/P4sibH+Ea4KKuqAGSg= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.1/go.mod h1:27M3BpVi0C02UiQh1w9nsBEit6pLhlaH3NHna6WUbDE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2 h1:gKWSTnqudpo8dAxqBqZnDoDWCiEh/40FziUjr/mo6uA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2/go.mod h1:x7+rkNmRoEN1U13A6JE2fXne9EWyJy54o3n6d4mGaXQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.2 h1:YZPjhyaGzhDQEvsffDEcpycq49nl7fiGcfJTIo8BszI= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.2/go.mod h1:2dIN8qhQfv37BdUYGgEC8Q3tteM3zFxTI1MLO2O3J3c= +github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= +github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.9.1 h1:50sS0RWhGpW/yZx2KcDNEb1u1MANv5BMEkJgcieEDTA= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.9.1/go.mod h1:ErZOtbzuHabipRTDTor0inoRlYwbsV1ovwSxjGs/uJo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -222,8 +227,8 @@ github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdn github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bradleyfalzon/ghinstallation/v2 v2.14.0 h1:0D4vKCHOvYrDU8u61TnE2JfNT4VRrBLphmxtqazTO+M= -github.com/bradleyfalzon/ghinstallation/v2 v2.14.0/go.mod h1:LOVmdZYVZ8jqdr4n9wWm1ocDiMz9IfMGfRkaYC1a52A= +github.com/bradleyfalzon/ghinstallation/v2 v2.16.0 h1:B91r9bHtXp/+XRgS5aZm6ZzTdz3ahgJYmkt4xZkgDz8= +github.com/bradleyfalzon/ghinstallation/v2 v2.16.0/go.mod h1:OeVe5ggFzoBnmgitZe/A+BqGOnv1DvU/0uiLQi1wutM= github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bsm/ginkgo/v2 v2.7.0/go.mod h1:AiKlXPm7ItEHNc/2+OkrNG4E0ITzojb9/xWzvQ9XZ9w= @@ -232,21 +237,19 @@ github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdb github.com/bsm/gomega v1.26.0/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= -github.com/buildkite/agent/v3 v3.81.0 h1:JVfkng2XnsXesFXwiFwLJFkuzVu4zvoJCvedfoIXD6E= -github.com/buildkite/agent/v3 v3.81.0/go.mod h1:edJeyycODRxaFvpT22rDGwaQ5oa4eB8GjtbjgX5VpFw= -github.com/buildkite/go-pipeline v0.13.1 h1:Y9p8pQIwPtauVwNrcmTDH6+XK7jE1nLuvWVaK8oymA8= -github.com/buildkite/go-pipeline v0.13.1/go.mod h1:2HHqlSFTYgHFhzedJu0LhLs9n5c9XkYnHiQFVN5HE4U= -github.com/buildkite/interpolate v0.1.3 h1:OFEhqji1rNTRg0u9DsSodg63sjJQEb1uWbENq9fUOBM= -github.com/buildkite/interpolate v0.1.3/go.mod h1:UNVe6A+UfiBNKbhAySrBbZFZFxQ+DXr9nWen6WVt/A8= -github.com/buildkite/roko v1.2.0 h1:hbNURz//dQqNl6Eo9awjQOVOZwSDJ8VEbBDxSfT9rGQ= -github.com/buildkite/roko v1.2.0/go.mod h1:23R9e6nHxgedznkwwfmqZ6+0VJZJZ2Sg/uVcp2cP46I= -github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= -github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/buildkite/agent/v3 v3.98.2 h1:VOOxv8XD8HVCtEvtRPQhvB6k2Gorha2gN1wGh94gYAA= +github.com/buildkite/agent/v3 v3.98.2/go.mod h1:+zCvvo/OlOwfs+AH3QvSn37H3cBXP3Fe18eoSbqUvnY= +github.com/buildkite/go-pipeline v0.13.3 h1:llI7sAdZ7sqYE7r8ePlmDADRhJ1K0Kua2+gv74Z9+Es= +github.com/buildkite/go-pipeline v0.13.3/go.mod h1:1uC2XdHkTV1G5jYv9K8omERIwrsYbBruBrPx1Zu1uFw= +github.com/buildkite/interpolate v0.1.5 h1:v2Ji3voik69UZlbfoqzx+qfcsOKLA61nHdU79VV+tPU= +github.com/buildkite/interpolate v0.1.5/go.mod h1:dHnrwHew5O8VNOAgMDpwRlFnhL5VSN6M1bHVmRZ9Ccc= +github.com/buildkite/roko v1.3.1 h1:t7K30ceLLYn6k7hQP4oq1c7dVlhgD5nRcuSRDEEnY1s= +github.com/buildkite/roko v1.3.1/go.mod h1:23R9e6nHxgedznkwwfmqZ6+0VJZJZ2Sg/uVcp2cP46I= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= -github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -261,44 +264,42 @@ github.com/clbanning/mxj/v2 v2.5.5/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn github.com/clbanning/mxj/v2 v2.7.0 h1:WA/La7UGCanFe5NpHF0Q3DNtnCsVoxbPKuyBNHWRyME= github.com/clbanning/mxj/v2 v2.7.0/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.5.0 h1:hxIWksrX6XN5a1L2TI/h53AGPhNHoUBo+TD1ms9+pys= -github.com/cloudflare/circl v1.5.0/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= +github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= +github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= -github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls= +github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/apd/v3 v3.2.1 h1:U+8j7t0axsIgvQUqthuNm82HIrYXodOV2iWLWtEaIwg= github.com/cockroachdb/apd/v3 v3.2.1/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be h1:J5BL2kskAlV9ckgEsNQXscjIaLiOYiZ75d4e94E6dcQ= github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod h1:mk5IQ+Y0ZeO87b858TlA645sVcEcbiX6YqP98kt+7+w= -github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= -github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= -github.com/containerd/containerd v1.7.24 h1:zxszGrGjrra1yYJW/6rhm9cJ1ZQ8rkKBR48brqsa7nA= -github.com/containerd/containerd v1.7.24/go.mod h1:7QUzfURqZWCZV7RLNEn1XjUCQLEf0bkaK4GjUaZehxw= -github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= -github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= -github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4= -github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/containerd v1.7.28 h1:Nsgm1AtcmEh4AHAJ4gGlNSaKgXiNccU270Dnf81FQ3c= +github.com/containerd/containerd v1.7.28/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs= +github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= +github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= -github.com/coreos/go-oidc/v3 v3.12.0 h1:sJk+8G2qq94rDI6ehZ71Bol3oUHy63qNYmkiSjrc/Jo= -github.com/coreos/go-oidc/v3 v3.12.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0= +github.com/coreos/go-oidc/v3 v3.15.0 h1:R6Oz8Z4bqWR7VFQ+sPSvZPQv4x8M+sJkDO5ojgwlyAg= +github.com/coreos/go-oidc/v3 v3.15.0/go.mod h1:HaZ3szPaZ0e4r6ebqvsLWlk2Tn+aejfmrfah6hnSYEU= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.19 h1:tUN6H7LWqNx4hQVxomd0CVsDwaDr9gaRQaI4GpSmrsA= github.com/creack/pty v1.1.19/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc= -github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= -github.com/danieljoos/wincred v1.2.1 h1:dl9cBrupW8+r5250DYkYxocLeZ1Y4vB1kxgtjxw8GQs= -github.com/danieljoos/wincred v1.2.1/go.mod h1:uGaFL9fDn3OLTvzCGulzE+SzjEe5NGlh5FdCcyfPwps= +github.com/danieljoos/wincred v1.2.2 h1:774zMFJrqaeYCK2W57BgAem/MLi6mtSE47MB6BOJ0i0= +github.com/danieljoos/wincred v1.2.2/go.mod h1:w7w4Utbrz8lqeMbDAK0lkNJUv5sAOkFi7nd/ogr0Uh8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -314,18 +315,20 @@ github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1G github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/distribution/distribution/v3 v3.0.0-rc.2 h1:tTrzntanYMbd20SyvdeR83Ya1l/aBwDcA3NCIpmwemc= -github.com/distribution/distribution/v3 v3.0.0-rc.2/go.mod h1:H2zIRRXS20ylnv2HTuKILAWuANjuA60GB7MLOsQag7Y= +github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= +github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v27.5.1+incompatible h1:JB9cieUT9YNiMITtIsguaN55PLOHhBSz3LKVc6cqWaY= -github.com/docker/cli v27.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= +github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/docker/cli v28.4.0+incompatible h1:RBcf3Kjw2pMtwui5V0DIMdyeab8glEw5QY0UUU4C9kY= +github.com/docker/cli v28.4.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.5.0+incompatible h1:um++2NcQtGRTz5eEgO6aJimo6/JxrTXC941hd05JO6U= -github.com/docker/docker v27.5.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= -github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= +github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= @@ -334,76 +337,81 @@ github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQ github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4= -github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/elazarl/goproxy v1.7.0 h1:EXv2nV4EjM60ZtsEVLYJG4oBXhDGutMKperpHsZ/v+0= -github.com/elazarl/goproxy v1.7.0/go.mod h1:X/5W/t+gzDyLfHW4DrMdpjqYjpXsURlBt9lpBDxZZZQ= -github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= -github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emicklei/proto v1.12.1 h1:6n/Z2pZAnBwuhU66Gs8160B8rrrYKo7h2F2sCOnNceE= -github.com/emicklei/proto v1.12.1/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A= +github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o= +github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/proto v1.13.4 h1:myn1fyf8t7tAqIzV91Tj9qXpvyXXGXk8OS2H6IBSc9g= +github.com/emicklei/proto v1.13.4/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= -github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= +github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= +github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= -github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= -github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= -github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= +github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= -github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= -github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fluxcd/cli-utils v0.36.0-flux.12 h1:8cD6SmaKa/lGo0KCu0XWiGrXJMLMBQwSsnoP0cG+Gjw= -github.com/fluxcd/cli-utils v0.36.0-flux.12/go.mod h1:Nb/zMqsJAzjz4/HIsEc2LTqxC6eC0rV26t4hkJT/F9o= +github.com/fluxcd/cli-utils v0.36.0-flux.15 h1:Et5QLnIpRjj+oZtM9gEybkAaoNsjysHq0y1253Ai94Y= +github.com/fluxcd/cli-utils v0.36.0-flux.15/go.mod h1:AqRUmWIfNE7cdL6NWSGF0bAlypGs+9x5UQ2qOtlEzv4= github.com/fluxcd/gitkit v0.6.0 h1:iNg5LTx6ePo+Pl0ZwqHTAkhbUHxGVSY3YCxCdw7VIFg= github.com/fluxcd/gitkit v0.6.0/go.mod h1:svOHuKi0fO9HoawdK4HfHAJJseZDHHjk7I3ihnCIqNo= -github.com/fluxcd/pkg/apis/acl v0.6.0 h1:rllf5uQLzTow81ZCslkQ6LPpDNqVQr6/fWaNksdUEtc= -github.com/fluxcd/pkg/apis/acl v0.6.0/go.mod h1:IVDZx3MAoDWjlLrJHMF9Z27huFuXAEQlnbWw0M6EcTs= -github.com/fluxcd/pkg/apis/event v0.16.0 h1:ffKc/3erowPnh72lFszz7sPQhLZ7bhqNrq+pu1Pb+JE= -github.com/fluxcd/pkg/apis/event v0.16.0/go.mod h1:D/QQi5lHT9/Ur3OMFLJO71D4KDQHbJ5s8dQV3h1ZAT0= -github.com/fluxcd/pkg/apis/meta v1.10.0 h1:rqbAuyl5ug7A5jjRf/rNwBXmNl6tJ9wG2iIsriwnQUk= -github.com/fluxcd/pkg/apis/meta v1.10.0/go.mod h1:n7NstXHDaleAUMajcXTVkhz0MYkvEXy1C/eLI/t1xoI= -github.com/fluxcd/pkg/auth v0.9.0 h1:9DBVlWvRPrW7xBSprvg49CcMh/keo4zPmC/JBEtiokM= -github.com/fluxcd/pkg/auth v0.9.0/go.mod h1:QXbeudfOpqox3sQtLwbknmNZ4mzLuoIN/dbgKgaC4+4= -github.com/fluxcd/pkg/cache v0.7.0 h1:6TTWbxCyAxErIAT338KrLTy96ds+vSDw4sEyypSISfs= -github.com/fluxcd/pkg/cache v0.7.0/go.mod h1:EHpyMSXf/ECKIoKEQmNCOesH2wfAdpmXR/ZXD6VwWRg= -github.com/fluxcd/pkg/git v0.25.0 h1:AZKYKalzbbOXbnLvOZ1FbV3pl9qYCbstGjEXuiQQsKo= -github.com/fluxcd/pkg/git v0.25.0/go.mod h1:xCs0kr/Z0LvZfocU3LxLLLKXsdSMoapFncc43snrc2c= -github.com/fluxcd/pkg/git/gogit v0.25.0 h1:yH2PllTPWsgOvmmR+oUSzDvGNKZKid7nYUvrXW2OrNU= -github.com/fluxcd/pkg/git/gogit v0.25.0/go.mod h1:yrwa7d6QvIow9vJP8S7KxqGwm1O14cSOdPRaUyicCMI= -github.com/fluxcd/pkg/gittestserver v0.16.0 h1:HXbxW6F24B3qgnkNm/UKz7Wpt1kKtmOsE2bVQUPWOhk= -github.com/fluxcd/pkg/gittestserver v0.16.0/go.mod h1:sGjpkv/X1NkJs43PSjlUxKTCit84Y1YyYn4U5ywBbFo= -github.com/fluxcd/pkg/helmtestserver v0.23.0 h1:Zyror0vD4y3pblTH/ZjUkXxfEFSRL5gPNdhuhBEUTXE= -github.com/fluxcd/pkg/helmtestserver v0.23.0/go.mod h1:jNd1XbiKYB/E1vTKlaO3pb4R60EqVjMCW4E+hTjci3c= -github.com/fluxcd/pkg/lockedfile v0.5.0 h1:rzFe+eudc1BWoLnuyaWprFZr6ZaiByixjSeLNf2hrB8= -github.com/fluxcd/pkg/lockedfile v0.5.0/go.mod h1:l/d/gk60oIy48rNWoPGSnjVWjjkSDLAtaMXYEdRfaQo= -github.com/fluxcd/pkg/masktoken v0.6.0 h1:ijSqMl2L9jBR3QFcHA0FX7kxV0xgSB4PY5p//8FdVR4= -github.com/fluxcd/pkg/masktoken v0.6.0/go.mod h1:bMj45KySJ2gLeFiFaXD5nQLNFlvDqGbZolsiurZKVUU= -github.com/fluxcd/pkg/oci v0.45.0 h1:lPtxqqzDOF3TAYx+Y91nULFiLzgmE8XDHt/uZwBqx44= -github.com/fluxcd/pkg/oci v0.45.0/go.mod h1:i4kFlYDC84u6vtIE54eco/ArcLPXqTv+/Gt7ncSKmoE= -github.com/fluxcd/pkg/runtime v0.53.1 h1:S+QRSoiU+LH1sTvJLNvT1x3E5hBq/sjOsRHazA7OqTo= -github.com/fluxcd/pkg/runtime v0.53.1/go.mod h1:8vkIhS1AhkmjC98LRm5xM+CRG5KySFTXpJWk+ZdtT4I= -github.com/fluxcd/pkg/sourceignore v0.11.0 h1:xzpYmc5/t/Ck+/DkJSX3r+VbahDRIAn5kbv04fynWUo= -github.com/fluxcd/pkg/sourceignore v0.11.0/go.mod h1:ri2FvlzX8ep2iszOK5gF/riYq2TNgpVvsfJ2QY0dLWI= -github.com/fluxcd/pkg/ssh v0.17.0 h1:o+MgdM/OB8R/+KEc3W3ml/inEKZqCwT8V71dkbTAbm4= -github.com/fluxcd/pkg/ssh v0.17.0/go.mod h1:4yU099LjFWOJXZiu73rvqA70mOoSXG2yqxfPBxhnGgQ= -github.com/fluxcd/pkg/tar v0.11.0 h1:pjf/rzr6HNAPiuxT59mtba9tfBtdNiSQ/UqduG8vZ2I= -github.com/fluxcd/pkg/tar v0.11.0/go.mod h1:+kiP25NqibWMpFWgizyPEMqnMJIux7bCgEy+4pfxyI4= -github.com/fluxcd/pkg/testserver v0.10.0 h1:g5l6mX9GndovWXCTW9xCPbL6YQYgphwe4Ee6cuBmLcA= -github.com/fluxcd/pkg/testserver v0.10.0/go.mod h1:dqpWALgSYdcmPS9OXq165s4OjUexVysl++EZJ8uZVkw= -github.com/fluxcd/pkg/version v0.6.0 h1:tYRWpV7RvBOO5ahD525TiDhWXmhnvBM0RAIY1MCRe9s= -github.com/fluxcd/pkg/version v0.6.0/go.mod h1:ZCl5BkIvXmMm3C4q4fz4aMi5LQHvcXNSEaL2puXIZo8= +github.com/fluxcd/pkg/apis/acl v0.9.0 h1:wBpgsKT+jcyZEcM//OmZr9RiF8klL3ebrDp2u2ThsnA= +github.com/fluxcd/pkg/apis/acl v0.9.0/go.mod h1:TttNS+gocsGLwnvmgVi3/Yscwqrjc17+vhgYfqkfrV4= +github.com/fluxcd/pkg/apis/event v0.19.0 h1:ZJU2voontkzp5rNYA4JMOu40S4tRcrWi4Do59EnyFwg= +github.com/fluxcd/pkg/apis/event v0.19.0/go.mod h1:deuIyUb6lh+Z1Ccvwwxhm1wNM3kpSo+vF1IgRnpaZfQ= +github.com/fluxcd/pkg/apis/meta v1.21.0 h1:R+bN02chcs0HUmyVDQhqe/FHmYLjipVDMLnyYfNX850= +github.com/fluxcd/pkg/apis/meta v1.21.0/go.mod h1:XUAEUgT4gkWDAEN79E141tmL+v4SV50tVZ/Ojpc/ueg= +github.com/fluxcd/pkg/artifact v0.3.0 h1:Mxescx4HOaXJDYhdgecmZwGdnrgPFu/N6sJY9GuTpuo= +github.com/fluxcd/pkg/artifact v0.3.0/go.mod h1:CFtfSBcma+WBkIhjxleaXoCwIjccdkunLO7gv/59xe8= +github.com/fluxcd/pkg/auth v0.31.0 h1:PIwSn7Onq74cGDTocZJZ6P47FxGvbT8NIW7UKFm51rU= +github.com/fluxcd/pkg/auth v0.31.0/go.mod h1:Qxc5OKRMLBwtxO0nf2stm4ZkgzXcrvF6x6BSquiAMW8= +github.com/fluxcd/pkg/cache v0.11.0 h1:fsE8S+una21fSNw4MDXGUIf0Gf1J+pqa4RbsVKf2aTI= +github.com/fluxcd/pkg/cache v0.11.0/go.mod h1:2RTIU6PsJniHmfnllQWFEo7fa5V8KQlnMgn4o0sme40= +github.com/fluxcd/pkg/git v0.36.0 h1:oakFKxTX5yiLcFzCS1SaV+mMXaODaF1Ic6/oCLfIe7I= +github.com/fluxcd/pkg/git v0.36.0/go.mod h1:4TgfjcoM3B2sGsO5VbfBSwJQYzNCONGihcTOW8P3Jxw= +github.com/fluxcd/pkg/git/gogit v0.40.0 h1:VCsHC1440jMk1wAGWCwkgU2nDUBOPeYbCk6/OtvbY7Y= +github.com/fluxcd/pkg/git/gogit v0.40.0/go.mod h1:nQVyfa+rYSeVQiwVH5f/C4o1sf2MtMFjMlt3VSkC+P0= +github.com/fluxcd/pkg/gittestserver v0.20.0 h1:xhzLV89mta23ZvTK0cpDCR6ni6vp5Di+9b4v3YBziMQ= +github.com/fluxcd/pkg/gittestserver v0.20.0/go.mod h1:vGmM9eDJk56gx+osTcSHeScefnAaL4czR+rsNsvh0nw= +github.com/fluxcd/pkg/helmtestserver v0.30.0 h1:gEJ6kHei8/SB8J/YemeWaypCxRtfmoejqMxtEOlZRgI= +github.com/fluxcd/pkg/helmtestserver v0.30.0/go.mod h1:xXOkfz7/4z8fz9GJYrYVB9we7bvtmdKKedBeGPHVlhs= +github.com/fluxcd/pkg/http/transport v0.7.0 h1:LbA0qzh1lT6GncWLkN/BjbSMrN8bdFtaa2TqxiIdyzs= +github.com/fluxcd/pkg/http/transport v0.7.0/go.mod h1:G3ptGZKlY0PJZsvWCwzV9vKQ90yfP/mKT2/ZdAud9LE= +github.com/fluxcd/pkg/lockedfile v0.7.0 h1:tmzW2GeMGuJMiCcVloXVd1vKZ92anm9WGkRgOBpWfRk= +github.com/fluxcd/pkg/lockedfile v0.7.0/go.mod h1:AzCV/h1N3hi/KtUDUCUgS8hl1+a1y+I6pmRo25dxdK0= +github.com/fluxcd/pkg/masktoken v0.8.0 h1:Dm5xIVNbg0s6zNttjDvimaG38bKsXwxBVo5b+D7ThVU= +github.com/fluxcd/pkg/masktoken v0.8.0/go.mod h1:Gc73ALOqIe+5Gj2V3JggMNiYcBiZ9bNNDYBE9R5XTTg= +github.com/fluxcd/pkg/oci v0.56.0 h1:t/jnHpizC+j7Gugw8y14HdfHnhLIgmxR3yNdArghUrM= +github.com/fluxcd/pkg/oci v0.56.0/go.mod h1:WZxMYYWfugc4rtnq2zHUIHxH0+e6IRhP9EDq+mW/Z2w= +github.com/fluxcd/pkg/runtime v0.84.0 h1:3M+egIQwQU9YYjKQkczyawG+9RUOkGtkDMQlePnEeTM= +github.com/fluxcd/pkg/runtime v0.84.0/go.mod h1:Wt9mUzQgMPQMu2D/wKl5pG4zh5vu/tfF5wq9pPobxOQ= +github.com/fluxcd/pkg/sourceignore v0.14.0 h1:ZiZzbXtXb/Qp7I7JCStsxOlX8ri8rWwCvmvIrJ0UzQQ= +github.com/fluxcd/pkg/sourceignore v0.14.0/go.mod h1:E3zKvyTyB+oQKqm/2I/jS6Rrt3B7fNuig/4bY2vi3bg= +github.com/fluxcd/pkg/ssh v0.21.0 h1:ZmyF0n9je0cTTkOpvFVgIhmdx9qtswnVE60TK4IzJh0= +github.com/fluxcd/pkg/ssh v0.21.0/go.mod h1:nX+gvJOmjf0E7lxq5mKKzDIdPEL2jOUQZbkBMS+mDtk= +github.com/fluxcd/pkg/tar v0.14.0 h1:9Gku8FIvPt2bixKldZnzXJ/t+7SloxePlzyVGOK8GVQ= +github.com/fluxcd/pkg/tar v0.14.0/go.mod h1:+rOWYk93qLEJ8WwmkvJOkB8i0dna1mrwJFybE8i9Udo= +github.com/fluxcd/pkg/testserver v0.13.0 h1:xEpBcEYtD7bwvZ+i0ZmChxKkDo/wfQEV3xmnzVybSSg= +github.com/fluxcd/pkg/testserver v0.13.0/go.mod h1:akRYv3FLQUsme15na9ihECRG6hBuqni4XEY9W8kzs8E= +github.com/fluxcd/pkg/version v0.10.0 h1:WETlCRbfbocsDItkCCeh/4x4zQkZ5i/lUe7P7VaQBrI= +github.com/fluxcd/pkg/version v0.10.0/go.mod h1:dgmjEq4ykvBnqK1oVXM+hcXx3kAY/b4uZDYUn8XnHjk= github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= @@ -411,10 +419,10 @@ github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7z github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= -github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk= @@ -429,34 +437,34 @@ github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UN github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= -github.com/go-git/go-git/v5 v5.13.2 h1:7O7xvsK7K+rZPKW6AQR1YyNhfywkv7B8/FsP3ki6Zv0= -github.com/go-git/go-git/v5 v5.13.2/go.mod h1:hWdW5P4YZRjmpGHwRH2v3zkWcNl6HeXaXQEMGb3NJ9A= +github.com/go-git/go-git/v5 v5.16.2 h1:fT6ZIOjE5iEnkzKyxTHK1W4HGAsPhqEqiSAssSO77hM= +github.com/go-git/go-git/v5 v5.16.2/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= -github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= -github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E= -github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= +github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY= +github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= +github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= +github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-ldap/ldap/v3 v3.4.10 h1:ot/iwPOhfpNVgB1o+AVXljizWZ9JTp7YF5oeyONmcJU= github.com/go-ldap/ldap/v3 v3.4.10/go.mod h1:JXh4Uxgi40P6E9rdsYqpUtbW46D9UTjJ9QSwGRznplY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= -github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= -github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= -github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= -github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU= +github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= +github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= +github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= @@ -467,16 +475,17 @@ github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9Z github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= -github.com/go-piv/piv-go v1.11.0 h1:5vAaCdRTFSIW4PeqMbnsDlUZ7odMYWnHBDGdmtU/Zhg= -github.com/go-piv/piv-go v1.11.0/go.mod h1:NZ2zmjVkfFaL/CF8cVQ/pXdXtuj110zEKGdJM6fJZZM= +github.com/go-piv/piv-go/v2 v2.3.0 h1:kKkrYlgLQTMPA6BiSL25A7/x4CEh2YCG7rtb/aTkx+g= +github.com/go-piv/piv-go/v2 v2.3.0/go.mod h1:ShZi74nnrWNQEdWzRUd/3cSig3uNOcEZp+EWl0oewnI= github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= -github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-sql-driver/mysql v1.9.2 h1:4cNKDYQ1I84SXslGddlsrMhc8k4LeDVj6Ad6WRjiHuU= +github.com/go-sql-driver/mysql v1.9.2/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= @@ -484,8 +493,8 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w= -github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= @@ -501,12 +510,11 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= -github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= -github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= +github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -519,9 +527,7 @@ github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:x github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= @@ -530,32 +536,28 @@ github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/certificate-transparency-go v1.2.1 h1:4iW/NwzqOqYEEoCBEFP+jPbBXbLqMpq3CifMyOnDUME= -github.com/google/certificate-transparency-go v1.2.1/go.mod h1:bvn/ytAccv+I6+DGkqpvSsEdiVGramgaSC6RD3tEmeE= -github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= -github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= +github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= +github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= -github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= -github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20241111191718-6bce25ecf029 h1:0G7T22yXy+FqumvxcEg48EU4llskcDeQ2eM3vaTr64c= -github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20241111191718-6bce25ecf029/go.mod h1:Xxhh5HFmICiLl0vmmfdsvuWPFITh3DqQf3UQqU2I6V8= -github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa h1:+MG+Q2Q7mtW6kCIbUPZ9ZMrj7xOWDKI1hhy1qp0ygI0= -github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20230516205744-dbecb1de8cfa/go.mod h1:KdL98/Va8Dy1irB6lTxIRIQ7bQj4lbrlvqUzKEQ+ZBU= -github.com/google/go-github/v55 v55.0.0 h1:4pp/1tNMB9X/LuAhs5i0KQAE40NmiR/y6prLNb9x9cg= -github.com/google/go-github/v55 v55.0.0/go.mod h1:JLahOTA1DnXzhxEymmFF5PP2tSS9JVNj68mSZNDwskA= -github.com/google/go-github/v69 v69.2.0 h1:wR+Wi/fN2zdUx9YxSmYE0ktiX9IAR/BeePzeaUUbEHE= -github.com/google/go-github/v69 v69.2.0/go.mod h1:xne4jymxLR6Uj9b7J7PyTpkMYstEMMwGZa0Aehh1azM= +github.com/google/go-containerregistry v0.20.6 h1:cvWX87UxxLgaH76b4hIvya6Dzz9qHB31qAwjAohdSTU= +github.com/google/go-containerregistry v0.20.6/go.mod h1:T0x8MuoAoKX/873bkeSfLD2FAkwCDf9/HZgsFJ02E2Y= +github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20250613215107-59a4b8593039 h1:1d9SJvpHXjFuYBHAS5576memil93kLpgBZ5OjdtvW4I= +github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20250613215107-59a4b8593039/go.mod h1:AlUTqI/YtH9ckkhLo4ClTAccEOZz8EaLVxqrfv56OFg= +github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20250225234217-098045d5e61f h1:GJRzEBoJv/A/E7JbTekq1Q0jFtAfY7TIxUFAK89Mmic= +github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20250225234217-098045d5e61f/go.mod h1:ZT74/OE6eosKneM9/LQItNxIMBV6CI5S46EXAnvkTBI= +github.com/google/go-github/v72 v72.0.0 h1:FcIO37BLoVPBO9igQQ6tStsv2asG4IPcYFi655PPvBM= +github.com/google/go-github/v72 v72.0.0/go.mod h1:WWtw8GMRiL62mvIquf1kO3onRHeWWKmK01qdCY8c5fg= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -564,24 +566,21 @@ github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20250128161936-077ca0a936bf h1:BvBLUD2hkvLI3dJTJMiopAq8/wp43AAZKTP7qdpptbU= -github.com/google/pprof v0.0.0-20250128161936-077ca0a936bf/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= -github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= -github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= +github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 h1:xhMrHhTJ6zxu3gA4enFM9MLn9AY7613teCdFnlUVbSQ= +github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/tink/go v1.7.0 h1:6Eox8zONGebBFcCBqkVmt60LaWZa6xg1cl/DwAh/J1w= -github.com/google/tink/go v1.7.0/go.mod h1:GAUOd+QE3pgj9q8VKIGTCP33c/B7eb4NhxLcgTJZStM= -github.com/google/trillian v1.6.0 h1:jMBeDBIkINFvS2n6oV5maDqfRlxREAc6CW9QYWQ0qT4= -github.com/google/trillian v1.6.0/go.mod h1:Yu3nIMITzNhhMJEHjAtp6xKiu+H/iHu2Oq5FjV2mCWI= +github.com/google/trillian v1.7.2 h1:EPBxc4YWY4Ak8tcuhyFleY+zYlbCDCa4Sn24e1Ka8Js= +github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81zxNQw05s= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= -github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= -github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o= -github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk= +github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= +github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= +github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= @@ -590,14 +589,16 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= -github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= -github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -607,16 +608,16 @@ github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB1 github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= -github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= +github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= +github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-sockaddr v1.0.5 h1:dvk7TIXCZpmfOlM+9mlcrWmWjw/wlKT+VDq2wMvfPJU= -github.com/hashicorp/go-sockaddr v1.0.5/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI= +github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= +github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -626,20 +627,28 @@ github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hashicorp/vault/api v1.14.0 h1:Ah3CFLixD5jmjusOgm8grfN9M0d+Y8fVR2SW0K6pJLU= -github.com/hashicorp/vault/api v1.14.0/go.mod h1:pV9YLxBGSz+cItFDd8Ii4G17waWOQ32zVjMWHe/cOqk= +github.com/hashicorp/vault/api v1.16.0 h1:nbEYGJiAPGzT9U4oWgaaB0g+Rj8E59QuHKyA5LhwQN4= +github.com/hashicorp/vault/api v1.16.0/go.mod h1:KhuUhzOD8lDSk29AtzNjgAu2kxRA9jL9NAbkFlqvkBA= github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/in-toto/attestation v1.1.0 h1:oRWzfmZPDSctChD0VaQV7MJrywKOzyNrtpENQFq//2Q= -github.com/in-toto/attestation v1.1.0/go.mod h1:DB59ytd3z7cIHgXxwpSX2SABrU6WJUKg/grpdgHVgVs= +github.com/in-toto/attestation v1.1.1 h1:QD3d+oATQ0dFsWoNh5oT0udQ3tUrOsZZ0Fc3tSgWbzI= +github.com/in-toto/attestation v1.1.1/go.mod h1:Dcq1zVwA2V7Qin8I7rgOi+i837wEf/mOZwRm047Sjys= github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= +github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= @@ -656,12 +665,10 @@ github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZ github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= -github.com/jellydator/ttlcache/v3 v3.2.0 h1:6lqVJ8X3ZaUwvzENqPAobDsXNExfUJd61u++uW8a3LE= -github.com/jellydator/ttlcache/v3 v3.2.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jellydator/ttlcache/v3 v3.3.0 h1:BdoC9cE81qXfrxeb9eoJi9dWrdhSuwXMAnHTbnBm4Wc= +github.com/jellydator/ttlcache/v3 v3.3.0/go.mod h1:bj2/e0l4jRnQdrnSTaGTsh4GSXvMjQcy41i7th0GVGw= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= @@ -677,16 +684,16 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= -github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= +github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= +github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= -github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= -github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -708,19 +715,17 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A= +github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= @@ -732,39 +737,34 @@ github.com/minio/crc64nvme v1.0.1 h1:DHQPrYPdqK7jQG/Ls5CTBZWeex/2FMS3G5XGkycuFrY github.com/minio/crc64nvme v1.0.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.87 h1:nkr9x0u53PespfxfUqxP3UYWiE2a41gaofgNnC4Y8WQ= -github.com/minio/minio-go/v7 v7.0.87/go.mod h1:33+O8h0tO7pCeCWwBVa07RhVVfB/3vS4kEX7rwYKmIg= +github.com/minio/minio-go/v7 v7.0.94 h1:1ZoksIKPyaSt64AVOyaQvhDOgVC3MfZsWM6mZXRUGtM= +github.com/minio/minio-go/v7 v7.0.94/go.mod h1:71t2CqDt3ThzESgZUlU1rBN54mksGGlkLcFgguDnnAc= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= -github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= -github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= -github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= -github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= -github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= -github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= -github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/mozillazg/docker-credential-acr-helper v0.4.0 h1:Uoh3Z9CcpEDnLiozDx+D7oDgRq7X+R296vAqAumnOcw= @@ -775,10 +775,10 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/notaryproject/notation-core-go v1.2.0 h1:WElMG9X0YXJhBd0A4VOxLNalTLrTjvqtIAj7JHr5X08= -github.com/notaryproject/notation-core-go v1.2.0/go.mod h1:+y3L1dOs2/ZwJIU5Imo7BBvZ/M3CFjXkydGGdK09EtA= -github.com/notaryproject/notation-go v1.3.0 h1:jn/hAVG5FyKHqTjcU+PCpVnyI4pwINrSk657Hx58j2Y= -github.com/notaryproject/notation-go v1.3.0/go.mod h1:ig6lhOPvLW4jrp6ZfaW+B3uNGKbcNW9pgIByvz/s31w= +github.com/notaryproject/notation-core-go v1.3.0 h1:mWJaw1QBpBxpjLSiKOjzbZvB+xh2Abzk14FHWQ+9Kfs= +github.com/notaryproject/notation-core-go v1.3.0/go.mod h1:hzvEOit5lXfNATGNBT8UQRx2J6Fiw/dq/78TQL8aE64= +github.com/notaryproject/notation-go v1.3.2 h1:4223iLXOHhEV7ZPzIUJEwwMkhlgzoYFCsMJvSH1Chb8= +github.com/notaryproject/notation-go v1.3.2/go.mod h1:/1kuq5WuLF6Gaer5re0Z6HlkQRlKYO4EbWWT/L7J1Uw= github.com/notaryproject/notation-plugin-framework-go v1.0.0 h1:6Qzr7DGXoCgXEQN+1gTZWuJAZvxh3p8Lryjn5FaLzi4= github.com/notaryproject/notation-plugin-framework-go v1.0.0/go.mod h1:RqWSrTOtEASCrGOEffq0n8pSg2KOgKYiWqFWczRSics= github.com/notaryproject/tspclient-go v1.0.0 h1:AwQ4x0gX8IHnyiZB1tggpn5NFqHpTEm1SDX8YNv4Dg4= @@ -799,42 +799,44 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= -github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= +github.com/onsi/ginkgo/v2 v2.25.2 h1:hepmgwx1D+llZleKQDMEvy8vIlCxMGt7W5ZxDjIEhsw= +github.com/onsi/ginkgo/v2 v2.25.2/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= -github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= -github.com/open-policy-agent/opa v0.68.0 h1:Jl3U2vXRjwk7JrHmS19U3HZO5qxQRinQbJ2eCJYSqJQ= -github.com/open-policy-agent/opa v0.68.0/go.mod h1:5E5SvaPwTpwt2WM177I9Z3eT7qUpmOGjk1ZdHs+TZ4w= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= +github.com/open-policy-agent/opa v1.5.1 h1:LTxxBJusMVjfs67W4FoRcnMfXADIGFMzpqnfk6D08Cg= +github.com/open-policy-agent/opa v1.5.1/go.mod h1:bYbS7u+uhTI+cxHQIpzvr5hxX0hV7urWtY+38ZtjMgk= github.com/opencontainers/go-digest v1.0.1-0.20220411205349-bde1400a84be h1:f2PlhC9pm5sqpBZFvnAoKj+KzXRzbjFMA+TqXfJdgho= github.com/opencontainers/go-digest v1.0.1-0.20220411205349-bde1400a84be/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/go-digest/blake3 v0.0.0-20240426182413-22b78e47854a h1:xwooQrLddjfeKhucuLS4ElD3TtuuRwF8QWC9eHrnbxY= -github.com/opencontainers/go-digest/blake3 v0.0.0-20240426182413-22b78e47854a/go.mod h1:kqQaIc6bZstKgnGpL7GD5dWoLKbA6mH1Y9ULjGImBnM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/go-digest/blake3 v0.0.0-20250813155314-89707e38ad1a h1:IAncDmJeD90l6+YR1Gf6r0HrmnRmOatzPfUpMS80ZTI= +github.com/opencontainers/go-digest/blake3 v0.0.0-20250813155314-89707e38ad1a/go.mod h1:kqQaIc6bZstKgnGpL7GD5dWoLKbA6mH1Y9ULjGImBnM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opencontainers/runc v1.2.4 h1:yWFgLkghp71D76Fa0l349yAl5g4Gse7DPYNlvkQ9Eiw= github.com/opencontainers/runc v1.2.4/go.mod h1:nSxcWUydXrsBZVYNSkTjoQ/N6rcyTtn+1SD5D4+kRIM= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/ory/dockertest/v3 v3.11.0 h1:OiHcxKAvSDUwsEVh2BjxQQc/5EHz9n0va9awCtNGuyA= -github.com/ory/dockertest/v3 v3.11.0/go.mod h1:VIPxS1gwT9NpPOrfD3rACs8Y9Z7yhzO4SB194iUDnUI= +github.com/ory/dockertest/v3 v3.12.0 h1:3oV9d0sDzlSQfHtIaB5k6ghUCVMVLpAY8hwrqoCyRCw= +github.com/ory/dockertest/v3 v3.12.0/go.mod h1:aKNDTva3cp8dwOWwb9cWuX84aH5akkxXRvO7KCwWVjE= github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= -github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= -github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= -github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= +github.com/pjbgf/sha1cd v0.4.0 h1:NXzbL1RvjTUi6kgYZCX3fPwwl27Q1LJndxtUDVfJGRY= +github.com/pjbgf/sha1cd v0.4.0/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -851,24 +853,26 @@ github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjz github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.21.0 h1:DIsaGmiaBkSangBgMtWdNfxbMNdku5IK6iNhrEqWvdA= -github.com/prometheus/client_golang v1.21.0/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/otlptranslator v0.0.2 h1:+1CdeLVrRQ6Psmhnobldo0kTp96Rj80DRXRd5OSnMEQ= +github.com/prometheus/otlptranslator v0.0.2/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf h1:014O62zIzQwvoD7Ekj3ePDF5bv9Xxy0w6AZk0qYbjUk= -github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf/go.mod h1:jgxiZysxFPM+iWKwQwPR+y+Jvo54ARd4EisXxKYpB5c= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/protocolbuffers/txtpbfmt v0.0.0-20241112170944-20d2c9ebc01d h1:HWfigq7lB31IeJL8iy7jkUmU/PG1Sr8jVGhS749dbUA= +github.com/protocolbuffers/txtpbfmt v0.0.0-20241112170944-20d2c9ebc01d/go.mod h1:jgxiZysxFPM+iWKwQwPR+y+Jvo54ARd4EisXxKYpB5c= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho= @@ -876,25 +880,25 @@ github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJu github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ= github.com/redis/go-redis/v9 v9.0.5/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= -github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= -github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= +github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI= +github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= -github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= -github.com/rubenv/sql-migrate v1.7.1 h1:f/o0WgfO/GqNuVg+6801K/KW3WdDSupzSjDYODmiUq4= -github.com/rubenv/sql-migrate v1.7.1/go.mod h1:Ob2Psprc0/3ggbM6wCzyYVFFuc6FyZrb2AS+ezLDFb4= +github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o= +github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= -github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= -github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= +github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= @@ -909,28 +913,28 @@ github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= -github.com/sigstore/cosign/v2 v2.4.1 h1:b8UXEfJFks3hmTwyxrRNrn6racpmccUycBHxDMkEPvU= -github.com/sigstore/cosign/v2 v2.4.1/go.mod h1:GvzjBeUKigI+XYnsoVQDmMAsMMc6engxztRSuxE+x9I= -github.com/sigstore/fulcio v1.6.3 h1:Mvm/bP6ELHgazqZehL8TANS1maAkRoM23CRAdkM4xQI= -github.com/sigstore/fulcio v1.6.3/go.mod h1:5SDgLn7BOUVLKe1DwOEX3wkWFu5qEmhUlWm+SFf0GH8= -github.com/sigstore/protobuf-specs v0.4.0 h1:yoZbdh0kZYKOSiVbYyA8J3f2wLh5aUk2SQB7LgAfIdU= -github.com/sigstore/protobuf-specs v0.4.0/go.mod h1:FKW5NYhnnFQ/Vb9RKtQk91iYd0MKJ9AxyqInEwU6+OI= -github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8= -github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc= -github.com/sigstore/sigstore v1.8.15 h1:9HHnZmxjPQSTPXTCZc25HDxxSTWwsGMh/ZhWZZ39maU= -github.com/sigstore/sigstore v1.8.15/go.mod h1:+Wa5mrG6A+Gss516YC9owy10q3IazqIRe0y1EoQRHHM= -github.com/sigstore/sigstore-go v0.6.1 h1:tGkkv1oDIER+QYU5MrjqlttQOVDWfSkmYwMqkJhB/cg= -github.com/sigstore/sigstore-go v0.6.1/go.mod h1:Xe5GHmUeACRFbomUWzVkf/xYCn8xVifb9DgqJrV2dIw= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.8 h1:2zHmUvaYCwV6LVeTo+OAkTm8ykOGzA9uFlAjwDPAUWM= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.8/go.mod h1:OEhheBplZinUsm7W9BupafztVZV3ldkAxEHbpAeC0Pk= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.8 h1:RKk4Z+qMaLORUdT7zntwMqKiYAej1VQlCswg0S7xNSY= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.8/go.mod h1:dMJdlBWKHMu2xf0wIKpbo7+QfG+RzVkBB3nHP8EMM5o= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.8 h1:89Xtxj8oqZt3UlSpCP4wApFvnQ2Z/dgowW5QOVhQigI= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.8/go.mod h1:Wa4xn/H3pU/yW/6tHiMXTpObBtBSGC5q29KYFEPKN6o= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.8 h1:Zte3Oogkd8m+nu2oK3yHtGmN++TZWh2Lm6q2iSprT1M= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.8/go.mod h1:j00crVw6ki4/WViXflw0zWgNALrAzZT+GbIK8v7Xlz4= -github.com/sigstore/timestamp-authority v1.2.2 h1:X4qyutnCQqJ0apMewFyx+3t7Tws00JQ/JonBiu3QvLE= -github.com/sigstore/timestamp-authority v1.2.2/go.mod h1:nEah4Eq4wpliDjlY342rXclGSO7Kb9hoRrl9tqLW13A= +github.com/sigstore/cosign/v2 v2.5.2 h1:i5Dw7M7W9OcWgyiknJB8vNx/07KweninBDxRoHPxqHE= +github.com/sigstore/cosign/v2 v2.5.2/go.mod h1:CYlcgkPQJZ5pvWlbl7mOfO/Q1S1N7r4tpdYCtFwhXco= +github.com/sigstore/fulcio v1.7.1 h1:RcoW20Nz49IGeZyu3y9QYhyyV3ZKQ85T+FXPKkvE+aQ= +github.com/sigstore/fulcio v1.7.1/go.mod h1:7lYY+hsd8Dt+IvKQRC+KEhWpCZ/GlmNvwIa5JhypMS8= +github.com/sigstore/protobuf-specs v0.4.3 h1:kRgJ+ciznipH9xhrkAbAEHuuxD3GhYnGC873gZpjJT4= +github.com/sigstore/protobuf-specs v0.4.3/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/rekor v1.3.10 h1:/mSvRo4MZ/59ECIlARhyykAlQlkmeAQpvBPlmJtZOCU= +github.com/sigstore/rekor v1.3.10/go.mod h1:JvryKJ40O0XA48MdzYUPu0y4fyvqt0C4iSY7ri9iu3A= +github.com/sigstore/sigstore v1.9.5 h1:Wm1LT9yF4LhQdEMy5A2JeGRHTrAWGjT3ubE5JUSrGVU= +github.com/sigstore/sigstore v1.9.5/go.mod h1:VtxgvGqCmEZN9X2zhFSOkfXxvKUjpy8RpUW39oCtoII= +github.com/sigstore/sigstore-go v1.0.0 h1:4N07S2zLxf09nTRwaPKyAxbKzpM8WJYUS8lWWaYxneU= +github.com/sigstore/sigstore-go v1.0.0/go.mod h1:UYsZ/XHE4eltv1o1Lu+n6poW1Z5to3f0+emvfXNxIN8= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.9.5 h1:qp2VFyKuFQvTGmZwk5Q7m5nE4NwnF9tHwkyz0gtWAck= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.9.5/go.mod h1:DKlQjjr+GsWljEYPycI0Sf8URLCk4EbGA9qYjF47j4g= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.9.5 h1:CRZcdYn5AOptStsLRAAACudAVmb1qUbhMlzrvm7ju3o= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.9.5/go.mod h1:b9rFfITq2fp1M3oJmq6lFFhSrAz5vOEJH1qzbMsZWN4= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.9.5 h1:7U0GsO0UGG1PdtgS6wBkRC0sMgq7BRVaFlPRwN4m1Qg= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.9.5/go.mod h1:/2qrI0nnCy/DTIPOMFaZlFnNPWEn5UeS70P37XEM88o= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.9.5 h1:S2ukEfN1orLKw2wEQIUHDDlzk0YcylhcheeZ5TGk8LI= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.9.5/go.mod h1:m7sQxVJmDa+rsmS1m6biQxaLX83pzNS7ThUEyjOqkCU= +github.com/sigstore/timestamp-authority v1.2.8 h1:BEV3fkphwU4zBp3allFAhCqQb99HkiyCXB853RIwuEE= +github.com/sigstore/timestamp-authority v1.2.8/go.mod h1:G2/0hAZmLPnevEwT1S9IvtNHUm9Ktzvso6xuRhl94ZY= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= @@ -939,26 +943,24 @@ github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnB github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= -github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262 h1:unQFBIznI+VYD1/1fApl1A+9VcBk+9dcqGfnePY87LY= -github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262/go.mod h1:MyOHs9Po2fbM1LHej6sBUT8ozbxmMOFG+E+rx/GSGuc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= -github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= -github.com/spiffe/go-spiffe/v2 v2.3.0 h1:g2jYNb/PDMB8I7mBGL2Zuq/Ur6hUhoroxGQFyD6tTj8= -github.com/spiffe/go-spiffe/v2 v2.3.0/go.mod h1:Oxsaio7DBgSNqhAO9i/9tLClaVlfRok7zvJnTV8ZyIY= +github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= +github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= +github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= +github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= @@ -977,22 +979,30 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= -github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= -github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM= +github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg= github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= -github.com/theupdateframework/go-tuf/v2 v2.0.1 h1:11p9tXpq10KQEujxjcIjDSivMKCMLguls7erXHZnxJQ= -github.com/theupdateframework/go-tuf/v2 v2.0.1/go.mod h1:baB22nBHeHBCeuGZcIlctNq4P61PcOdyARlplg5xmLA= +github.com/theupdateframework/go-tuf/v2 v2.1.1 h1:OWcoHItwsGO+7m0wLa7FDWPR4oB1cj0zOr1kosE4G+I= +github.com/theupdateframework/go-tuf/v2 v2.1.1/go.mod h1:V675cQGhZONR0OGQ8r1feO0uwtsTBYPDWHzAAPn5rjE= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0/go.mod h1:jY5YN2BqD/KSCHM9SqZPIpJNG/u3zwfLXHgws4x2IRw= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0 h1:6nAX1aRGnkg2SEUMwO5toB2tQkP0Jd6cbmZ/K5Le1V0= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0/go.mod h1:HOC5NWW1wBI2Vke1FGcRBvDATkEYE7AUDiYbXqi2sBw= +github.com/tink-crypto/tink-go/v2 v2.4.0 h1:8VPZeZI4EeZ8P/vB6SIkhlStrJfivTJn+cQ4dtyHNh0= +github.com/tink-crypto/tink-go/v2 v2.4.0/go.mod h1:l//evrF2Y3MjdbpNDNGnKgCpo5zSmvUvnQ4MU+yE2sw= +github.com/tinylib/msgp v1.3.0 h1:ULuf7GPooDaIlbyvgAxBV/FI7ynli6LZ1/nVUNu+0ww= +github.com/tinylib/msgp v1.3.0/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/tjfoc/gmsm v1.3.2/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w= @@ -1000,14 +1010,14 @@ github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho= github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE= github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= -github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs= -github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI= +github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= +github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vektah/gqlparser/v2 v2.5.26 h1:REqqFkO8+SOEgZHR/eHScjjVjGS8Nk3RMO/juiTobN4= +github.com/vektah/gqlparser/v2 v2.5.26/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo= github.com/veraison/go-cose v1.3.0 h1:2/H5w8kdSpQJyVtIhx8gmwPJ2uSz1PkyWFx0idbd7rk= github.com/veraison/go-cose v1.3.0/go.mod h1:df09OV91aHoQWLmy1KsDdYiagtXgyAwAl8vFeFn1gMc= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xanzy/go-gitlab v0.109.0 h1:RcRme5w8VpLXTSTTMZdVoQWY37qTJWg+gwdQl4aAttE= -github.com/xanzy/go-gitlab v0.109.0/go.mod h1:wKNKh3GkYDMOsGmnfuX+ITCmDuSDWFO0G+C4AygL9RY= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -1041,72 +1051,80 @@ github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ= -github.com/zeebo/errs v1.3.0 h1:hmiaKqgYZzcVgRL1Vkc1Mn2914BbzB0IBxs+ebeutGs= -github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= +gitlab.com/gitlab-org/api/client-go v0.130.1 h1:1xF5C5Zq3sFeNg3PzS2z63oqrxifne3n/OnbI7nptRc= +gitlab.com/gitlab-org/api/client-go v0.130.1/go.mod h1:ZhSxLAWadqP6J9lMh40IAZOlOxBLPRh7yFOXR/bMJWM= go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= -go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= -go.opentelemetry.io/contrib/detectors/gcp v1.29.0 h1:TiaiXB4DpGD3sdzNlYQxruQngn5Apwzi1X0DRhuGvDQ= -go.opentelemetry.io/contrib/detectors/gcp v1.29.0/go.mod h1:GW2aWZNwR2ZxDLdv8OyC2G8zkRoQBuURgV7RPQgcPoU= -go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= -go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= -go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= -go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0/go.mod h1:5KXybFvPGds3QinJWQT7pmXf+TN5YIa7CNYObWRkj50= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= -go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= -go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0/go.mod h1:zKU4zUgKiaRxrdovSS2amdM5gOc59slmo/zJwGX+YBg= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 h1:SZmDnHcgp3zwlPBS2JX2urGYe/jBKEIT6ZedHRUyCz8= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0/go.mod h1:fdWW0HtZJ7+jNpTKUR0GpMEDP69nR8YBJQxNiVCE3jk= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsux7Qmq8ToKAx1XCilTQECZ0KDZyTw= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= -go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= -go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= -go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= -go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= -go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= -go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= -go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= -go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= -go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= -go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= -go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= -go.step.sm/crypto v0.51.2 h1:5EiCGIMg7IvQTGmJrwRosbXeprtT80OhoS/PJarg60o= -go.step.sm/crypto v0.51.2/go.mod h1:QK7czLjN2k+uqVp5CHXxJbhc70kVRSP+0CQF3zsR5M0= +go.opentelemetry.io/contrib/bridges/prometheus v0.63.0 h1:/Rij/t18Y7rUayNg7Id6rPrEnHgorxYabm2E6wUdPP4= +go.opentelemetry.io/contrib/bridges/prometheus v0.63.0/go.mod h1:AdyDPn6pkbkt2w01n3BubRVk7xAsCRq1Yg1mpfyA/0E= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0 h1:ZoYbqX7OaA/TAikspPl3ozPI6iY6LiIY9I8cUfm+pJs= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts= +go.opentelemetry.io/contrib/exporters/autoexport v0.63.0 h1:NLnZybb9KkfMXPwZhd5diBYJoVxiO9Qa06dacEA7ySY= +go.opentelemetry.io/contrib/exporters/autoexport v0.63.0/go.mod h1:OvRg7gm5WRSCtxzGSsrFHbDLToYlStHNZQ+iPNIyD6g= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0 h1:OMqPldHt79PqWKOMYIAQs3CxAi7RLgPxwfFSwr4ZxtM= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.14.0/go.mod h1:1biG4qiqTxKiUCtoWDPpL3fB3KxVwCiGw81j3nKMuHE= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0 h1:QQqYw3lkrzwVsoEX0w//EhH/TCnpRdEenKBOOEIMjWc= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.14.0/go.mod h1:gSVQcr17jk2ig4jqJ2DX30IdWH251JcNAecvrqTxH1s= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 h1:vl9obrcoWVKp/lwl8tRE33853I8Xru9HFbw/skNeLs8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0/go.mod h1:GAXRxmLJcVM3u22IjTg74zWBrRCKq8BnOqUVLodpcpw= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0 h1:Oe2z/BCg5q7k4iXC3cqJxKYg0ieRiOqF0cecFYdPTwk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0/go.mod h1:ZQM5lAJpOsKnYagGg/zV2krVqTtaVdYdDkhMoX6Oalg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4= +go.opentelemetry.io/otel/exporters/prometheus v0.60.0 h1:cGtQxGvZbnrWdC2GyjZi0PDKVSLWP/Jocix3QWfXtbo= +go.opentelemetry.io/otel/exporters/prometheus v0.60.0/go.mod h1:hkd1EekxNo69PTV4OWFGZcKQiIqg0RfuWExcPKFvepk= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0 h1:B/g+qde6Mkzxbry5ZZag0l7QrQBCtVm7lVjaLgmpje8= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.14.0/go.mod h1:mOJK8eMmgW6ocDJn6Bn11CcZ05gi3P8GylBXEkZtbgA= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0 h1:wm/Q0GAAykXv83wzcKzGGqAnnfLFyFe7RslekZuv+VI= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0/go.mod h1:ra3Pa40+oKjvYh+ZD3EdxFZZB0xdMfuileHAm4nNN7w= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE= +go.opentelemetry.io/otel/log v0.14.0 h1:2rzJ+pOAZ8qmZ3DDHg73NEKzSZkhkGIua9gXtxNGgrM= +go.opentelemetry.io/otel/log v0.14.0/go.mod h1:5jRG92fEAgx0SU/vFPxmJvhIuDU9E1SUnEQrMlJpOno= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/log v0.14.0 h1:JU/U3O7N6fsAXj0+CXz21Czg532dW2V4gG1HE/e8Zrg= +go.opentelemetry.io/otel/sdk/log v0.14.0/go.mod h1:imQvII+0ZylXfKU7/wtOND8Hn4OpT3YUoIgqJVksUkM= +go.opentelemetry.io/otel/sdk/log/logtest v0.14.0 h1:Ijbtz+JKXl8T2MngiwqBlPaHqc4YCaP/i13Qrow6gAM= +go.opentelemetry.io/otel/sdk/log/logtest v0.14.0/go.mod h1:dCU8aEL6q+L9cYTqcVOk8rM9Tp8WdnHOPLiBgp0SGOA= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/proto/otlp v1.8.0 h1:fRAZQDcAFHySxpJ1TwlA1cJ4tvcrw7nXl9xWWC8N5CE= +go.opentelemetry.io/proto/otlp v1.8.0/go.mod h1:tIeYOeNBU4cvmPqpaji1P+KbB4Oloai8wN4rWzRrFF0= +go.step.sm/crypto v0.66.0 h1:9TW6BEguOtcS9NIjja9bDQ+j8OjhenU/F6lJfHjbXNU= +go.step.sm/crypto v0.66.0/go.mod h1:anqGyvO/Px05D1mznHq4/a9wwP1I1DmMZvk+TWX5Dzo= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -1115,7 +1133,6 @@ golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= @@ -1123,14 +1140,15 @@ golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45 golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/crypto v0.34.0 h1:+/C6tk6rf/+t5DhUketUbD1aNGqiSX3j15Z6xuIDlBA= -golang.org/x/crypto v0.34.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -1142,8 +1160,8 @@ golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= -golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1159,7 +1177,6 @@ golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1176,11 +1193,11 @@ golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= -golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= -golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE= -golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1196,8 +1213,8 @@ golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= -golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1224,7 +1241,6 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1233,11 +1249,12 @@ golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1247,11 +1264,12 @@ golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= -golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= -golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -1264,10 +1282,10 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= -golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= -golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= -golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -1284,51 +1302,45 @@ golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= -golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= -gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/api v0.211.0 h1:IUpLjq09jxBSV1lACO33CGY3jsRcbctfGzhj+ZSE/Bg= -google.golang.org/api v0.211.0/go.mod h1:XOloB4MXFH4UTlQSGuNUxw0UT74qdENK8d6JNsXKLi0= +gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= +gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/api v0.248.0 h1:hUotakSkcwGdYUqzCRc5yGYsg4wXxpkKlW5ryVqvC1Y= +google.golang.org/api v0.248.0/go.mod h1:yAFUAF56Li7IuIQbTFoLwXTCI6XCFKueOlS7S9e4F9k= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= -google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc= -google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= -google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= -google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= -google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a h1:UIpYSuWdWHSzjwcAFRLjKcPXFZVVLXGEM23W+NWqipw= -google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a/go.mod h1:9i1T9n4ZinTUZGgzENMi8MDDgbGC5mqTS75JAv6xN3A= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM= -google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1357,53 +1369,51 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= -helm.sh/helm/v3 v3.17.0 h1:DUD4AGdNVn7PSTYfxe1gmQG7s18QeWv/4jI9TubnhT0= -helm.sh/helm/v3 v3.17.0/go.mod h1:Mo7eGyKPPHlS0Ml67W8z/lbkox/gD9Xt1XpD6bxvZZA= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +helm.sh/helm/v3 v3.19.0 h1:krVyCGa8fa/wzTZgqw0DUiXuRT5BPdeqE/sQXujQ22k= +helm.sh/helm/v3 v3.19.0/go.mod h1:Lk/SfzN0w3a3C3o+TdAKrLwJ0wcZ//t1/SDXAvfgDdc= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= -k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= -k8s.io/apiextensions-apiserver v0.32.1 h1:hjkALhRUeCariC8DiVmb5jj0VjIc1N0DREP32+6UXZw= -k8s.io/apiextensions-apiserver v0.32.1/go.mod h1:sxWIGuGiYov7Io1fAS2X06NjMIk5CbRHc2StSmbaQto= -k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= -k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.1 h1:oo0OozRos66WFq87Zc5tclUX2r0mymoVHRq8JmR7Aak= -k8s.io/apiserver v0.32.1/go.mod h1:UcB9tWjBY7aryeI5zAgzVJB/6k7E97bkr1RgqDz0jPw= -k8s.io/cli-runtime v0.32.1 h1:19nwZPlYGJPUDbhAxDIS2/oydCikvKMHsxroKNGA2mM= -k8s.io/cli-runtime v0.32.1/go.mod h1:NJPbeadVFnV2E7B7vF+FvU09mpwYlZCu8PqjzfuOnkY= -k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= -k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= -k8s.io/component-base v0.32.1 h1:/5IfJ0dHIKBWysGV0yKTFfacZ5yNV1sulPh3ilJjRZk= -k8s.io/component-base v0.32.1/go.mod h1:j1iMMHi/sqAHeG5z+O9BFNCF698a1u0186zkjMZQ28w= +k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE= +k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug= +k8s.io/apiextensions-apiserver v0.34.0 h1:B3hiB32jV7BcyKcMU5fDaDxk882YrJ1KU+ZSkA9Qxoc= +k8s.io/apiextensions-apiserver v0.34.0/go.mod h1:hLI4GxE1BDBy9adJKxUxCEHBGZtGfIg98Q+JmTD7+g0= +k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0= +k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/apiserver v0.34.0 h1:Z51fw1iGMqN7uJ1kEaynf2Aec1Y774PqU+FVWCFV3Jg= +k8s.io/apiserver v0.34.0/go.mod h1:52ti5YhxAvewmmpVRqlASvaqxt0gKJxvCeW7ZrwgazQ= +k8s.io/cli-runtime v0.34.0 h1:N2/rUlJg6TMEBgtQ3SDRJwa8XyKUizwjlOknT1mB2Cw= +k8s.io/cli-runtime v0.34.0/go.mod h1:t/skRecS73Piv+J+FmWIQA2N2/rDjdYSQzEE67LUUs8= +k8s.io/client-go v0.34.0 h1:YoWv5r7bsBfb0Hs2jh8SOvFbKzzxyNo0nSb0zC19KZo= +k8s.io/client-go v0.34.0/go.mod h1:ozgMnEKXkRjeMvBZdV1AijMHLTh3pbACPvK7zFR+QQY= +k8s.io/component-base v0.34.0 h1:bS8Ua3zlJzapklsB1dZgjEJuJEeHjj8yTu1gxE2zQX8= +k8s.io/component-base v0.34.0/go.mod h1:RSCqUdvIjjrEm81epPcjQ/DS+49fADvGSCkIP3IC6vg= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg= -k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= -k8s.io/kubectl v0.32.1 h1:/btLtXLQUU1rWx8AEvX9jrb9LaI6yeezt3sFALhB8M8= -k8s.io/kubectl v0.32.1/go.mod h1:sezNuyWi1STk4ZNPVRIFfgjqMI6XMf+oCVLjZen/pFQ= -k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= -k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= -oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo= -oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c= -oras.land/oras-go/v2 v2.5.0/go.mod h1:z4eisnLP530vwIOUOJeBIj0aGI0L1C3d53atvCBqZHg= -sigs.k8s.io/controller-runtime v0.20.1 h1:JbGMAG/X94NeM3xvjenVUaBjy6Ui4Ogd/J5ZtjZnHaE= -sigs.k8s.io/controller-runtime v0.20.1/go.mod h1:BrP3w158MwvB3ZbNpaAcIKkHQ7YGpYnzpoSTZ8E14WU= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/kubectl v0.34.0 h1:NcXz4TPTaUwhiX4LU+6r6udrlm0NsVnSkP3R9t0dmxs= +k8s.io/kubectl v0.34.0/go.mod h1:bmd0W5i+HuG7/p5sqicr0Li0rR2iIhXL0oUyLF3OjR4= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= +oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= +sigs.k8s.io/controller-runtime v0.22.1 h1:Ah1T7I+0A7ize291nJZdS1CabF/lB4E++WizgV24Eqg= +sigs.k8s.io/controller-runtime v0.22.1/go.mod h1:FwiwRjkRPbiN+zp2QRp7wlTCzbUXxZ/D4OzuQUDwBHY= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= -sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ= -sigs.k8s.io/kustomize/api v0.19.0/go.mod h1:/BbwnivGVcBh1r+8m3tH1VNxJmHSk1PzP5fkP6lbL1o= -sigs.k8s.io/kustomize/kyaml v0.19.0 h1:RFge5qsO1uHhwJsu3ipV7RNolC7Uozc0jUBC/61XSlA= -sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/rf9NNu1cwY= -sigs.k8s.io/release-utils v0.8.4 h1:4QVr3UgbyY/d9p74LBhg0njSVQofUsAZqYOzVZBhdBw= -sigs.k8s.io/release-utils v0.8.4/go.mod h1:m1bHfscTemQp+z+pLCZnkXih9n0+WukIUU70n6nFnU0= -sigs.k8s.io/structured-merge-diff/v4 v4.5.0 h1:nbCitCK2hfnhyiKo6uf2HxUPTCodY6Qaf85SbDIaMBk= -sigs.k8s.io/structured-merge-diff/v4 v4.5.0/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= +sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= +sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78= +sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/release-utils v0.11.1 h1:hzvXGpHgHJfLOJB6TRuu14bzWc3XEglHmXHJqwClSZE= +sigs.k8s.io/release-utils v0.11.1/go.mod h1:ybR2V/uQAOGxYfzYtBenSYeXWkBGNP2qnEiX77ACtpc= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index 79a769a8c..f186b9dd3 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -1,5 +1,5 @@ /* -Copyright 2024 The Flux authors +Copyright 2025 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/azure/blob.go b/internal/bucket/azure/blob.go similarity index 94% rename from pkg/azure/blob.go rename to internal/bucket/azure/blob.go index 24f778a85..5bf814b7d 100644 --- a/pkg/azure/blob.go +++ b/internal/bucket/azure/blob.go @@ -37,6 +37,8 @@ import ( corev1 "k8s.io/api/core/v1" ctrl "sigs.k8s.io/controller-runtime" + "github.com/fluxcd/pkg/auth" + azureauth "github.com/fluxcd/pkg/auth/azure" "github.com/fluxcd/pkg/masktoken" sourcev1 "github.com/fluxcd/source-controller/api/v1" @@ -87,6 +89,7 @@ type options struct { proxyURL *url.URL withoutCredentials bool withoutRetries bool + authOpts []auth.Option } // withoutCredentials forces the BlobClient to not use any credentials. @@ -107,6 +110,13 @@ func withoutRetries() Option { } } +// WithAuth sets the auth options for workload identity authentication. +func WithAuth(authOpts ...auth.Option) Option { + return func(o *options) { + o.authOpts = authOpts + } +} + // NewClient creates a new Azure Blob storage client. // The credential config on the client is set based on the data from the // Bucket and Secret. It detects credentials in the Secret in the following @@ -130,7 +140,7 @@ func withoutRetries() Option { // // If no credentials are found, and the azidentity.ChainedTokenCredential can // not be established. A simple client without credentials is returned. -func NewClient(obj *sourcev1.Bucket, opts ...Option) (c *BlobClient, err error) { +func NewClient(ctx context.Context, obj *sourcev1.Bucket, opts ...Option) (c *BlobClient, err error) { c = &BlobClient{} var o options @@ -192,7 +202,7 @@ func NewClient(obj *sourcev1.Bucket, opts ...Option) (c *BlobClient, err error) // Compose token chain based on environment. // This functions as a replacement for azidentity.NewDefaultAzureCredential // to not shell out. - token, err = chainCredentialWithSecret(o.secret) + token, err = chainCredentialWithSecret(ctx, o.secret, o.authOpts...) if err != nil { err = fmt.Errorf("failed to create environment credential chain: %w", err) return nil, err @@ -470,7 +480,7 @@ func sasTokenFromSecret(ep string, secret *corev1.Secret) (string, error) { // - azidentity.ManagedIdentityCredential with defaults. // // If no valid token is created, it returns nil. -func chainCredentialWithSecret(secret *corev1.Secret) (azcore.TokenCredential, error) { +func chainCredentialWithSecret(ctx context.Context, secret *corev1.Secret, opts ...auth.Option) (azcore.TokenCredential, error) { var creds []azcore.TokenCredential credOpts := &azidentity.EnvironmentCredentialOptions{} @@ -483,28 +493,7 @@ func chainCredentialWithSecret(secret *corev1.Secret) (azcore.TokenCredential, e if token, _ := azidentity.NewEnvironmentCredential(credOpts); token != nil { creds = append(creds, token) } - if clientID := os.Getenv("AZURE_CLIENT_ID"); clientID != "" { - if file, ok := os.LookupEnv("AZURE_FEDERATED_TOKEN_FILE"); ok { - if _, ok := os.LookupEnv("AZURE_AUTHORITY_HOST"); ok { - if tenantID, ok := os.LookupEnv("AZURE_TENANT_ID"); ok { - if token, _ := azidentity.NewWorkloadIdentityCredential(&azidentity.WorkloadIdentityCredentialOptions{ - ClientID: clientID, - TenantID: tenantID, - TokenFilePath: file, - }); token != nil { - creds = append(creds, token) - } - } - } - } - - if token, _ := azidentity.NewManagedIdentityCredential(&azidentity.ManagedIdentityCredentialOptions{ - ID: azidentity.ClientID(clientID), - }); token != nil { - creds = append(creds, token) - } - } - if token, _ := azidentity.NewManagedIdentityCredential(nil); token != nil { + if token := azureauth.NewTokenCredential(ctx, opts...); token != nil { creds = append(creds, token) } diff --git a/pkg/azure/blob_integration_test.go b/internal/bucket/azure/blob_integration_test.go similarity index 100% rename from pkg/azure/blob_integration_test.go rename to internal/bucket/azure/blob_integration_test.go diff --git a/pkg/azure/blob_test.go b/internal/bucket/azure/blob_test.go similarity index 98% rename from pkg/azure/blob_test.go rename to internal/bucket/azure/blob_test.go index 6c77cd13d..83f17e900 100644 --- a/pkg/azure/blob_test.go +++ b/internal/bucket/azure/blob_test.go @@ -45,7 +45,7 @@ import ( func TestNewClientAndBucketExistsWithProxy(t *testing.T) { g := NewWithT(t) - proxyAddr, proxyPort := testproxy.New(t) + proxyAddr, _ := testproxy.New(t) // start mock bucket server bucketListener, bucketAddr, _ := testlistener.New(t) @@ -91,7 +91,7 @@ func TestNewClientAndBucketExistsWithProxy(t *testing.T) { { name: "with incorrect proxy", endpoint: bucketEndpoint, - proxyURL: &url.URL{Scheme: "http", Host: fmt.Sprintf("localhost:%d", proxyPort+1)}, + proxyURL: &url.URL{Scheme: "http", Host: fmt.Sprintf("localhost:%d", 1)}, err: "connection refused", }, } @@ -106,7 +106,8 @@ func TestNewClientAndBucketExistsWithProxy(t *testing.T) { }, } - client, err := NewClient(bucket, + client, err := NewClient(t.Context(), + bucket, WithProxyURL(tt.proxyURL), withoutCredentials(), withoutRetries()) @@ -115,7 +116,7 @@ func TestNewClientAndBucketExistsWithProxy(t *testing.T) { ok, err := client.BucketExists(context.Background(), "podinfo") if tt.err != "" { - g.Expect(err.Error()).To(ContainSubstring(tt.err)) + g.Expect(err).To(MatchError(ContainSubstring(tt.err))) g.Expect(ok).To(BeFalse()) } else { g.Expect(err).ToNot(HaveOccurred()) @@ -472,7 +473,7 @@ func Test_sasTokenFromSecret(t *testing.T) { func Test_chainCredentialWithSecret(t *testing.T) { g := NewWithT(t) - got, err := chainCredentialWithSecret(nil) + got, err := chainCredentialWithSecret(t.Context(), nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(BeAssignableToTypeOf(&azidentity.ChainedTokenCredential{})) } diff --git a/pkg/gcp/gcp.go b/internal/bucket/gcp/gcp.go similarity index 88% rename from pkg/gcp/gcp.go rename to internal/bucket/gcp/gcp.go index 936c7587a..70afe9fcd 100644 --- a/pkg/gcp/gcp.go +++ b/internal/bucket/gcp/gcp.go @@ -34,6 +34,11 @@ import ( htransport "google.golang.org/api/transport/http" corev1 "k8s.io/api/core/v1" ctrl "sigs.k8s.io/controller-runtime" + + "github.com/fluxcd/pkg/auth" + gcpauth "github.com/fluxcd/pkg/auth/gcp" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) var ( @@ -69,13 +74,21 @@ func WithProxyURL(proxyURL *url.URL) Option { } } +// WithAuth sets the auth options for workload identity authentication. +func WithAuth(authOpts ...auth.Option) Option { + return func(o *options) { + o.authOpts = authOpts + } +} + type options struct { secret *corev1.Secret proxyURL *url.URL + authOpts []auth.Option // newCustomHTTPClient should create a new HTTP client for interacting with the GCS API. // This is a test-only option required for mocking the real logic, which requires either - // a valid Google Service Account Key or ADC. Both are not available in tests. + // a valid Google Service Account Key or Controller-Level Workload Identity. Both are not available in tests. // The real logic is implemented in the newHTTPClient function, which is used when // constructing the default options object. newCustomHTTPClient func(context.Context, *options) (*http.Client, error) @@ -89,7 +102,7 @@ func newOptions() *options { // NewClient creates a new GCP storage client. The Client will automatically look for the Google Application // Credential environment variable or look for the Google Application Credential file. -func NewClient(ctx context.Context, opts ...Option) (*GCSClient, error) { +func NewClient(ctx context.Context, bucket *sourcev1.Bucket, opts ...Option) (*GCSClient, error) { o := newOptions() for _, opt := range opts { opt(o) @@ -100,7 +113,10 @@ func NewClient(ctx context.Context, opts ...Option) (*GCSClient, error) { switch { case o.secret != nil && o.proxyURL == nil: clientOpts = append(clientOpts, option.WithCredentialsJSON(o.secret.Data["serviceaccount"])) - case o.proxyURL != nil: + case o.secret == nil && o.proxyURL == nil: + tokenSource := gcpauth.NewTokenSource(ctx, o.authOpts...) + clientOpts = append(clientOpts, option.WithTokenSource(tokenSource)) + default: // o.proxyURL != nil: httpClient, err := o.newCustomHTTPClient(ctx, o) if err != nil { return nil, err @@ -135,6 +151,9 @@ func newHTTPClient(ctx context.Context, o *options) (*http.Client, error) { return nil, fmt.Errorf("failed to create Google credentials from secret: %w", err) } opts = append(opts, option.WithCredentials(creds)) + } else { // Workload Identity. + tokenSource := gcpauth.NewTokenSource(ctx, o.authOpts...) + opts = append(opts, option.WithTokenSource(tokenSource)) } transport, err := htransport.NewTransport(ctx, baseTransport, opts...) @@ -160,14 +179,14 @@ func ValidateSecret(secret *corev1.Secret) error { // exists, or returns a (client) error. func (c *GCSClient) BucketExists(ctx context.Context, bucketName string) (bool, error) { _, err := c.Client.Bucket(bucketName).Attrs(ctx) - if err == gcpstorage.ErrBucketNotExist { + if err == nil { + return true, nil + } + if errors.Is(err, gcpstorage.ErrBucketNotExist) { // Not returning error to be compatible with minio's API. return false, nil } - if err != nil { - return false, err - } - return true, nil + return false, err } // FGetObject gets the object from the provided object storage bucket, and diff --git a/pkg/gcp/gcp_test.go b/internal/bucket/gcp/gcp_test.go similarity index 79% rename from pkg/gcp/gcp_test.go rename to internal/bucket/gcp/gcp_test.go index aa252324c..0c12a72ea 100644 --- a/pkg/gcp/gcp_test.go +++ b/internal/bucket/gcp/gcp_test.go @@ -34,13 +34,14 @@ import ( "cloud.google.com/go/compute/metadata" gcpstorage "cloud.google.com/go/storage" + . "github.com/onsi/gomega" "google.golang.org/api/googleapi" "google.golang.org/api/option" raw "google.golang.org/api/storage/v1" - "gotest.tools/assert" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + sourcev1 "github.com/fluxcd/source-controller/api/v1" testproxy "github.com/fluxcd/source-controller/tests/proxy" ) @@ -81,6 +82,22 @@ var ( } ) +// createTestBucket creates a test bucket for testing purposes +func createTestBucket() *sourcev1.Bucket { + return &sourcev1.Bucket{ + ObjectMeta: v1.ObjectMeta{ + Name: "test-bucket", + Namespace: "default", + }, + Spec: sourcev1.BucketSpec{ + BucketName: bucketName, + Endpoint: "storage.googleapis.com", + Provider: sourcev1.BucketProviderGoogle, + Interval: v1.Duration{Duration: time.Minute * 5}, + }, + } +} + func TestMain(m *testing.M) { hc, host, close = newTestServer(func(w http.ResponseWriter, r *http.Request) { io.Copy(io.Discard, r.Body) @@ -146,52 +163,55 @@ func TestMain(m *testing.M) { } func TestNewClientWithSecretErr(t *testing.T) { - gcpClient, err := NewClient(context.Background(), WithSecret(secret.DeepCopy())) + bucket := createTestBucket() + gcpClient, err := NewClient(context.Background(), bucket, WithSecret(secret.DeepCopy())) t.Log(err) - assert.Error(t, err, "dialing: invalid character 'e' looking for beginning of value") - assert.Assert(t, gcpClient == nil) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal("dialing: invalid character 'e' looking for beginning of value")) + g.Expect(gcpClient).To(BeNil()) } func TestNewClientWithProxyErr(t *testing.T) { _, envADCIsSet := os.LookupEnv(envADC) - assert.Assert(t, !envADCIsSet) - assert.Assert(t, !metadata.OnGCE()) + g := NewWithT(t) + g.Expect(envADCIsSet).To(BeFalse()) + g.Expect(metadata.OnGCE()).To(BeFalse()) - tests := []struct { - name string - opts []Option - err string - }{ - { - name: "invalid secret", - opts: []Option{WithSecret(secret.DeepCopy())}, - err: "failed to create Google credentials from secret: invalid character 'e' looking for beginning of value", - }, - { - name: "attempts default credentials", - err: "failed to create Google HTTP transport: google: could not find default credentials. See https://cloud.google.com/docs/authentication/external/set-up-adc for more information", - }, - } + t.Run("with secret", func(t *testing.T) { + g := NewWithT(t) + bucket := createTestBucket() + gcpClient, err := NewClient(context.Background(), bucket, + WithProxyURL(&url.URL{}), + WithSecret(secret.DeepCopy())) + g.Expect(err).To(HaveOccurred()) + g.Expect(gcpClient).To(BeNil()) + g.Expect(err.Error()).To(Equal("failed to create Google credentials from secret: invalid character 'e' looking for beginning of value")) + }) - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - opts := append([]Option{WithProxyURL(&url.URL{})}, tt.opts...) - gcpClient, err := NewClient(context.Background(), opts...) - assert.Error(t, err, tt.err) - assert.Assert(t, gcpClient == nil) - }) - } + t.Run("without secret", func(t *testing.T) { + g := NewWithT(t) + bucket := createTestBucket() + gcpClient, err := NewClient(context.Background(), bucket, + WithProxyURL(&url.URL{})) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(gcpClient).NotTo(BeNil()) + bucketAttrs, err := gcpClient.Client.Bucket("some-bucket").Attrs(context.Background()) + g.Expect(err).To(HaveOccurred()) + g.Expect(bucketAttrs).To(BeNil()) + g.Expect(err.Error()).To(ContainSubstring("failed to create provider access token")) + }) } func TestProxy(t *testing.T) { proxyAddr, proxyPort := testproxy.New(t) err := os.Setenv(envGCSHost, fmt.Sprintf("https://%s", host)) - assert.NilError(t, err) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) defer func() { err := os.Unsetenv(envGCSHost) - assert.NilError(t, err) + g.Expect(err).NotTo(HaveOccurred()) }() tests := []struct { @@ -223,16 +243,19 @@ func TestProxy(t *testing.T) { return &http.Client{Transport: transport}, nil } }) - gcpClient, err := NewClient(context.Background(), opts...) - assert.NilError(t, err) - assert.Assert(t, gcpClient != nil) + bucket := createTestBucket() + gcpClient, err := NewClient(context.Background(), bucket, opts...) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(gcpClient).NotTo(BeNil()) gcpClient.Client.SetRetry(gcpstorage.WithMaxAttempts(1)) exists, err := gcpClient.BucketExists(context.Background(), bucketName) if tt.err != "" { - assert.ErrorContains(t, err, tt.err) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.err)) } else { - assert.NilError(t, err) - assert.Assert(t, exists) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(exists).To(BeTrue()) } }) } @@ -243,8 +266,9 @@ func TestBucketExists(t *testing.T) { Client: client, } exists, err := gcpClient.BucketExists(context.Background(), bucketName) - assert.NilError(t, err) - assert.Assert(t, exists) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(exists).To(BeTrue()) } func TestBucketNotExists(t *testing.T) { @@ -253,8 +277,9 @@ func TestBucketNotExists(t *testing.T) { Client: client, } exists, err := gcpClient.BucketExists(context.Background(), bucket) - assert.NilError(t, err) - assert.Assert(t, !exists) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(exists).To(BeFalse()) } func TestVisitObjects(t *testing.T) { @@ -268,12 +293,14 @@ func TestVisitObjects(t *testing.T) { etags = append(etags, etag) return nil }) - assert.NilError(t, err) - assert.DeepEqual(t, keys, []string{objectName}) - assert.DeepEqual(t, etags, []string{objectEtag}) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(keys).To(Equal([]string{objectName})) + g.Expect(etags).To(Equal([]string{objectEtag})) } func TestVisitObjectsErr(t *testing.T) { + g := NewWithT(t) gcpClient := &GCSClient{ Client: client, } @@ -281,7 +308,9 @@ func TestVisitObjectsErr(t *testing.T) { err := gcpClient.VisitObjects(context.Background(), badBucketName, "", func(key, etag string) error { return nil }) - assert.Error(t, err, fmt.Sprintf("listing objects from bucket '%s' failed: storage: bucket doesn't exist", badBucketName)) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring( + fmt.Sprintf("listing objects from bucket '%s' failed: storage: bucket doesn't exist", badBucketName))) } func TestVisitObjectsCallbackErr(t *testing.T) { @@ -292,10 +321,13 @@ func TestVisitObjectsCallbackErr(t *testing.T) { err := gcpClient.VisitObjects(context.Background(), bucketName, "", func(key, etag string) error { return mockErr }) - assert.Error(t, err, mockErr.Error()) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(mockErr.Error())) } func TestFGetObject(t *testing.T) { + g := NewWithT(t) tempDir := t.TempDir() gcpClient := &GCSClient{ Client: client, @@ -303,33 +335,34 @@ func TestFGetObject(t *testing.T) { localPath := filepath.Join(tempDir, objectName) etag, err := gcpClient.FGetObject(context.Background(), bucketName, objectName, localPath) if err != io.EOF { - assert.NilError(t, err) + g.Expect(err).NotTo(HaveOccurred()) } - assert.Equal(t, etag, objectEtag) + g.Expect(etag).To(Equal(objectEtag)) } func TestFGetObjectNotExists(t *testing.T) { + g := NewWithT(t) object := "notexists.txt" tempDir := t.TempDir() gcsClient := &GCSClient{ Client: client, } localPath := filepath.Join(tempDir, object) - _, err = gcsClient.FGetObject(context.Background(), bucketName, object, localPath) - if err != io.EOF { - assert.Error(t, err, "storage: object doesn't exist") - assert.Check(t, gcsClient.ObjectIsNotFound(err)) - } + _, err := gcsClient.FGetObject(context.Background(), bucketName, object, localPath) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("storage: object doesn't exist")) } func TestFGetObjectDirectoryIsFileName(t *testing.T) { + g := NewWithT(t) tempDir := t.TempDir() gcpClient := &GCSClient{ Client: client, } _, err = gcpClient.FGetObject(context.Background(), bucketName, objectName, tempDir) if err != io.EOF { - assert.Error(t, err, "filename is a directory") + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal("filename is a directory")) } } @@ -355,10 +388,12 @@ func TestValidateSecret(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() err := ValidateSecret(tt.secret) + g := NewWithT(t) if tt.error { - assert.Error(t, err, fmt.Sprintf("invalid '%v' secret data: required fields 'serviceaccount'", tt.secret.Name)) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(fmt.Sprintf("invalid '%v' secret data: required fields 'serviceaccount'", tt.secret.Name))) } else { - assert.NilError(t, err) + g.Expect(err).NotTo(HaveOccurred()) } }) } diff --git a/pkg/minio/minio.go b/internal/bucket/minio/minio.go similarity index 88% rename from pkg/minio/minio.go rename to internal/bucket/minio/minio.go index 6c7da9727..026200a83 100644 --- a/pkg/minio/minio.go +++ b/internal/bucket/minio/minio.go @@ -30,6 +30,9 @@ import ( "github.com/minio/minio-go/v7/pkg/s3utils" corev1 "k8s.io/api/core/v1" + "github.com/fluxcd/pkg/auth" + awsauth "github.com/fluxcd/pkg/auth/aws" + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) @@ -46,6 +49,7 @@ type options struct { tlsConfig *tls.Config stsTLSConfig *tls.Config proxyURL *url.URL + authOpts []auth.Option } // Option is a function that configures the Minio client. @@ -86,8 +90,15 @@ func WithSTSTLSConfig(tlsConfig *tls.Config) Option { } } +// WithAuth sets the auth options for workload identity authentication. +func WithAuth(authOpts ...auth.Option) Option { + return func(o *options) { + o.authOpts = authOpts + } +} + // NewClient creates a new Minio storage client. -func NewClient(bucket *sourcev1.Bucket, opts ...Option) (*MinioClient, error) { +func NewClient(ctx context.Context, bucket *sourcev1.Bucket, opts ...Option) (*MinioClient, error) { var o options for _, opt := range opts { opt(&o) @@ -105,7 +116,11 @@ func NewClient(bucket *sourcev1.Bucket, opts ...Option) (*MinioClient, error) { case o.secret != nil: minioOpts.Creds = newCredsFromSecret(o.secret) case bucketProvider == sourcev1.BucketProviderAmazon: - minioOpts.Creds = newAWSCreds(bucket, o.proxyURL) + creds, err := newAWSCreds(ctx, &o) + if err != nil { + return nil, err + } + minioOpts.Creds = creds case bucketProvider == sourcev1.BucketProviderGeneric: minioOpts.Creds = newGenericCreds(bucket, &o) } @@ -159,23 +174,30 @@ func newCredsFromSecret(secret *corev1.Secret) *credentials.Credentials { } // newAWSCreds creates a new Minio credentials object for `aws` bucket provider. -func newAWSCreds(bucket *sourcev1.Bucket, proxyURL *url.URL) *credentials.Credentials { - stsEndpoint := "" - if sts := bucket.Spec.STS; sts != nil { - stsEndpoint = sts.Endpoint - } - - creds := credentials.NewIAM(stsEndpoint) - if proxyURL != nil { - transport := http.DefaultTransport.(*http.Transport).Clone() - transport.Proxy = http.ProxyURL(proxyURL) - client := &http.Client{Transport: transport} - creds = credentials.New(&credentials.IAM{ - Client: client, - Endpoint: stsEndpoint, - }) +// +// This function is only called when Secret authentication is not available. +// +// Uses AWS SDK's config.LoadDefaultConfig() which supports: +// - Workload Identity (IRSA/EKS Pod Identity) +// - EC2 instance profiles +// - Environment variables +// - Shared credentials files +// - All other AWS SDK authentication methods +func newAWSCreds(ctx context.Context, o *options) (*credentials.Credentials, error) { + var opts auth.Options + opts.Apply(o.authOpts...) + + awsCredsProvider := awsauth.NewCredentialsProvider(ctx, o.authOpts...) + awsCreds, err := awsCredsProvider.Retrieve(ctx) + if err != nil { + return nil, fmt.Errorf("AWS authentication failed: %w", err) } - return creds + + return credentials.NewStaticV4( + awsCreds.AccessKeyID, + awsCreds.SecretAccessKey, + awsCreds.SessionToken, + ), nil } // newGenericCreds creates a new Minio credentials object for the `generic` bucket provider. diff --git a/pkg/minio/minio_test.go b/internal/bucket/minio/minio_test.go similarity index 79% rename from pkg/minio/minio_test.go rename to internal/bucket/minio/minio_test.go index 9a31d49b5..d6ba7baa4 100644 --- a/pkg/minio/minio_test.go +++ b/internal/bucket/minio/minio_test.go @@ -20,7 +20,6 @@ import ( "context" "crypto/tls" "crypto/x509" - "encoding/json" "encoding/xml" "errors" "fmt" @@ -36,9 +35,9 @@ import ( "github.com/google/uuid" miniov7 "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" + . "github.com/onsi/gomega" "github.com/ory/dockertest/v3" "github.com/ory/dockertest/v3/docker" - "gotest.tools/assert" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -52,7 +51,7 @@ import ( const ( objectName string = "test.yaml" - objectEtag string = "2020beab5f1711919157756379622d1d" + objectEtag string = "b07bba5a280b58791bc78fb9fc414b09" ) var ( @@ -76,6 +75,8 @@ var ( testServerCert string // testServerKey is the path to the server key used to start the Minio and STS servers. testServerKey string + // ctx is the common context used in tests. + ctx context.Context ) var ( @@ -126,6 +127,9 @@ var ( ) func TestMain(m *testing.M) { + // Initialize common test context + ctx = context.Background() + // Uses a sensible default on Windows (TCP/HTTP) and Linux/MacOS (socket) pool, err := dockertest.NewPool("") if err != nil { @@ -173,7 +177,7 @@ func TestMain(m *testing.M) { testMinioAddress = fmt.Sprintf("127.0.0.1:%v", resource.GetPort("9000/tcp")) // Construct a Minio client using the address of the Minio server. - testMinioClient, err = NewClient(bucketStub(bucket, testMinioAddress), + testMinioClient, err = NewClient(ctx, bucketStub(bucket, testMinioAddress), WithSecret(secret.DeepCopy()), WithTLSConfig(testTLSConfig)) if err != nil { @@ -197,7 +201,6 @@ func TestMain(m *testing.M) { log.Fatalf("could not connect to docker: %s", err) } - ctx := context.Background() createBucket(ctx) addObjectToBucket(ctx) run := m.Run() @@ -208,104 +211,98 @@ func TestMain(m *testing.M) { } func TestNewClient(t *testing.T) { - minioClient, err := NewClient(bucketStub(bucket, testMinioAddress), + minioClient, err := NewClient(ctx, bucketStub(bucket, testMinioAddress), WithSecret(secret.DeepCopy()), WithTLSConfig(testTLSConfig)) - assert.NilError(t, err) - assert.Assert(t, minioClient != nil) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(minioClient).NotTo(BeNil()) } func TestNewClientEmptySecret(t *testing.T) { - minioClient, err := NewClient(bucketStub(bucket, testMinioAddress), + minioClient, err := NewClient(ctx, bucketStub(bucket, testMinioAddress), WithSecret(emptySecret.DeepCopy()), WithTLSConfig(testTLSConfig)) - assert.NilError(t, err) - assert.Assert(t, minioClient != nil) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(minioClient).NotTo(BeNil()) } -func TestNewClientAwsProvider(t *testing.T) { - minioClient, err := NewClient(bucketStub(bucketAwsProvider, testMinioAddress)) - assert.NilError(t, err) - assert.Assert(t, minioClient != nil) +func TestNewClientAWSProvider(t *testing.T) { + t.Run("with secret", func(t *testing.T) { + validSecret := corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: "valid-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "accesskey": []byte(testMinioRootUser), + "secretkey": []byte(testMinioRootPassword), + }, + Type: "Opaque", + } + + bucket := bucketStub(bucketAwsProvider, testMinioAddress) + minioClient, err := NewClient(ctx, bucket, WithSecret(&validSecret)) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(minioClient).NotTo(BeNil()) + }) + + t.Run("without secret", func(t *testing.T) { + bucket := bucketStub(bucketAwsProvider, testMinioAddress) + minioClient, err := NewClient(ctx, bucket) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("AWS authentication failed")) + g.Expect(minioClient).To(BeNil()) + }) } func TestBucketExists(t *testing.T) { - ctx := context.Background() exists, err := testMinioClient.BucketExists(ctx, bucketName) - assert.NilError(t, err) - assert.Assert(t, exists) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(exists).To(BeTrue()) } func TestBucketNotExists(t *testing.T) { - ctx := context.Background() exists, err := testMinioClient.BucketExists(ctx, "notexistsbucket") - assert.NilError(t, err) - assert.Assert(t, !exists) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(exists).To(BeFalse()) } func TestFGetObject(t *testing.T) { - ctx := context.Background() tempDir := t.TempDir() path := filepath.Join(tempDir, sourceignore.IgnoreFile) _, err := testMinioClient.FGetObject(ctx, bucketName, objectName, path) - assert.NilError(t, err) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) } func TestNewClientAndFGetObjectWithSTSEndpoint(t *testing.T) { - // start a mock AWS STS server - awsSTSListener, awsSTSAddr, awsSTSPort := testlistener.New(t) - awsSTSEndpoint := fmt.Sprintf("http://%s", awsSTSAddr) - awsSTSHandler := http.NewServeMux() - awsSTSHandler.HandleFunc("PUT "+credentials.TokenPath, - func(w http.ResponseWriter, r *http.Request) { - _, err := w.Write([]byte("mock-token")) - assert.NilError(t, err) - }) - awsSTSHandler.HandleFunc("GET "+credentials.DefaultIAMSecurityCredsPath, - func(w http.ResponseWriter, r *http.Request) { - token := r.Header.Get(credentials.TokenRequestHeader) - assert.Equal(t, token, "mock-token") - _, err := w.Write([]byte("mock-role")) - assert.NilError(t, err) - }) var credsRetrieved bool - awsSTSHandler.HandleFunc("GET "+credentials.DefaultIAMSecurityCredsPath+"mock-role", - func(w http.ResponseWriter, r *http.Request) { - token := r.Header.Get(credentials.TokenRequestHeader) - assert.Equal(t, token, "mock-token") - err := json.NewEncoder(w).Encode(map[string]any{ - "Code": "Success", - "AccessKeyID": testMinioRootUser, - "SecretAccessKey": testMinioRootPassword, - }) - assert.NilError(t, err) - credsRetrieved = true - }) - awsSTSServer := &http.Server{ - Addr: awsSTSAddr, - Handler: awsSTSHandler, - } - go awsSTSServer.Serve(awsSTSListener) - defer awsSTSServer.Shutdown(context.Background()) // start a mock LDAP STS server - ldapSTSListener, ldapSTSAddr, ldapSTSPort := testlistener.New(t) + ldapSTSListener, ldapSTSAddr, _ := testlistener.New(t) ldapSTSEndpoint := fmt.Sprintf("https://%s", ldapSTSAddr) ldapSTSHandler := http.NewServeMux() var ldapUsername, ldapPassword string ldapSTSHandler.HandleFunc("POST /", func(w http.ResponseWriter, r *http.Request) { + g := NewWithT(t) err := r.ParseForm() - assert.NilError(t, err) + g.Expect(err).NotTo(HaveOccurred()) username := r.Form.Get("LDAPUsername") password := r.Form.Get("LDAPPassword") - assert.Equal(t, username, ldapUsername) - assert.Equal(t, password, ldapPassword) + g.Expect(username).To(Equal(ldapUsername)) + g.Expect(password).To(Equal(ldapPassword)) var result credentials.LDAPIdentityResult result.Credentials.AccessKey = testMinioRootUser result.Credentials.SecretKey = testMinioRootPassword err = xml.NewEncoder(w).Encode(credentials.AssumeRoleWithLDAPResponse{Result: result}) - assert.NilError(t, err) + g.Expect(err).NotTo(HaveOccurred()) credsRetrieved = true }) ldapSTSServer := &http.Server{ @@ -313,7 +310,7 @@ func TestNewClientAndFGetObjectWithSTSEndpoint(t *testing.T) { Handler: ldapSTSHandler, } go ldapSTSServer.ServeTLS(ldapSTSListener, testServerCert, testServerKey) - defer ldapSTSServer.Shutdown(context.Background()) + defer ldapSTSServer.Shutdown(ctx) // start proxy proxyAddr, proxyPort := testproxy.New(t) @@ -327,42 +324,6 @@ func TestNewClientAndFGetObjectWithSTSEndpoint(t *testing.T) { ldapPassword string err string }{ - { - name: "with correct aws endpoint", - provider: "aws", - stsSpec: &sourcev1.BucketSTSSpec{ - Provider: "aws", - Endpoint: awsSTSEndpoint, - }, - }, - { - name: "with incorrect aws endpoint", - provider: "aws", - stsSpec: &sourcev1.BucketSTSSpec{ - Provider: "aws", - Endpoint: fmt.Sprintf("http://localhost:%d", awsSTSPort+1), - }, - err: "connection refused", - }, - { - name: "with correct aws endpoint and proxy", - provider: "aws", - stsSpec: &sourcev1.BucketSTSSpec{ - Provider: "aws", - Endpoint: awsSTSEndpoint, - }, - opts: []Option{WithProxyURL(&url.URL{Scheme: "http", Host: proxyAddr})}, - }, - { - name: "with correct aws endpoint and incorrect proxy", - provider: "aws", - stsSpec: &sourcev1.BucketSTSSpec{ - Provider: "aws", - Endpoint: awsSTSEndpoint, - }, - opts: []Option{WithProxyURL(&url.URL{Scheme: "http", Host: fmt.Sprintf("localhost:%d", proxyPort+1)})}, - err: "connection refused", - }, { name: "with correct ldap endpoint", provider: "generic", @@ -377,7 +338,7 @@ func TestNewClientAndFGetObjectWithSTSEndpoint(t *testing.T) { provider: "generic", stsSpec: &sourcev1.BucketSTSSpec{ Provider: "ldap", - Endpoint: fmt.Sprintf("http://localhost:%d", ldapSTSPort+1), + Endpoint: fmt.Sprintf("http://localhost:%d", 1), }, err: "connection refused", }, @@ -431,7 +392,7 @@ func TestNewClientAndFGetObjectWithSTSEndpoint(t *testing.T) { Provider: "ldap", Endpoint: ldapSTSEndpoint, }, - err: "tls: failed to verify certificate: x509: certificate signed by unknown authority", + err: "tls: failed to verify certificate", }, } @@ -448,18 +409,19 @@ func TestNewClientAndFGetObjectWithSTSEndpoint(t *testing.T) { opts := tt.opts opts = append(opts, WithTLSConfig(testTLSConfig)) - minioClient, err := NewClient(bucket, opts...) - assert.NilError(t, err) - assert.Assert(t, minioClient != nil) + minioClient, err := NewClient(ctx, bucket, opts...) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(minioClient).NotTo(BeNil()) - ctx := context.Background() path := filepath.Join(t.TempDir(), sourceignore.IgnoreFile) _, err = minioClient.FGetObject(ctx, bucketName, objectName, path) if tt.err != "" { - assert.ErrorContains(t, err, tt.err) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.err)) } else { - assert.NilError(t, err) - assert.Assert(t, credsRetrieved) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(credsRetrieved).To(BeTrue()) } }) } @@ -487,33 +449,35 @@ func TestNewClientAndFGetObjectWithProxy(t *testing.T) { // run test for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - minioClient, err := NewClient(bucketStub(bucket, testMinioAddress), + minioClient, err := NewClient(ctx, bucketStub(bucket, testMinioAddress), WithSecret(secret.DeepCopy()), WithTLSConfig(testTLSConfig), WithProxyURL(tt.proxyURL)) - assert.NilError(t, err) - assert.Assert(t, minioClient != nil) - ctx := context.Background() + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(minioClient).NotTo(BeNil()) tempDir := t.TempDir() path := filepath.Join(tempDir, sourceignore.IgnoreFile) _, err = minioClient.FGetObject(ctx, bucketName, objectName, path) if tt.errSubstring != "" { - assert.ErrorContains(t, err, tt.errSubstring) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tt.errSubstring)) } else { - assert.NilError(t, err) + g.Expect(err).NotTo(HaveOccurred()) } }) } } func TestFGetObjectNotExists(t *testing.T) { - ctx := context.Background() tempDir := t.TempDir() badKey := "invalid.txt" path := filepath.Join(tempDir, badKey) _, err := testMinioClient.FGetObject(ctx, bucketName, badKey, path) - assert.Error(t, err, "The specified key does not exist.") - assert.Check(t, testMinioClient.ObjectIsNotFound(err)) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal("The specified key does not exist.")) + g.Expect(testMinioClient.ObjectIsNotFound(err)).To(BeTrue()) } func TestVisitObjects(t *testing.T) { @@ -524,18 +488,20 @@ func TestVisitObjects(t *testing.T) { etags = append(etags, etag) return nil }) - assert.NilError(t, err) - assert.DeepEqual(t, keys, []string{objectName}) - assert.DeepEqual(t, etags, []string{objectEtag}) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(keys).To(Equal([]string{objectName})) + g.Expect(etags).To(Equal([]string{objectEtag})) } func TestVisitObjectsErr(t *testing.T) { - ctx := context.Background() badBucketName := "bad-bucket" err := testMinioClient.VisitObjects(ctx, badBucketName, prefix, func(string, string) error { return nil }) - assert.Error(t, err, fmt.Sprintf("listing objects from bucket '%s' failed: The specified bucket does not exist", badBucketName)) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(fmt.Sprintf("listing objects from bucket '%s' failed: The specified bucket does not exist", badBucketName))) } func TestVisitObjectsCallbackErr(t *testing.T) { @@ -543,7 +509,9 @@ func TestVisitObjectsCallbackErr(t *testing.T) { err := testMinioClient.VisitObjects(context.TODO(), bucketName, prefix, func(key, etag string) error { return mockErr }) - assert.Error(t, err, mockErr.Error()) + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(mockErr.Error())) } func TestValidateSecret(t *testing.T) { @@ -571,11 +539,13 @@ func TestValidateSecret(t *testing.T) { tt := testCase t.Run(tt.name, func(t *testing.T) { t.Parallel() + g := NewWithT(t) err := ValidateSecret(tt.secret) if tt.error { - assert.Error(t, err, fmt.Sprintf("invalid '%v' secret data: required fields 'accesskey' and 'secretkey'", tt.secret.Name)) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(fmt.Sprintf("invalid '%v' secret data: required fields 'accesskey' and 'secretkey'", tt.secret.Name))) } else { - assert.NilError(t, err) + g.Expect(err).NotTo(HaveOccurred()) } }) } @@ -660,11 +630,13 @@ func TestValidateSTSProvider(t *testing.T) { if tt.withCertSecret { sts.CertSecretRef = &meta.LocalObjectReference{} } + g := NewWithT(t) err := ValidateSTSProvider(tt.bucketProvider, sts) if tt.err != "" { - assert.Error(t, err, tt.err) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(tt.err)) } else { - assert.NilError(t, err) + g.Expect(err).NotTo(HaveOccurred()) } }) } @@ -746,11 +718,13 @@ func TestValidateSTSSecret(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Parallel() + g := NewWithT(t) err := ValidateSTSSecret(tt.provider, tt.secret) if tt.err != "" { - assert.Error(t, err, tt.err) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(Equal(tt.err)) } else { - assert.NilError(t, err) + g.Expect(err).NotTo(HaveOccurred()) } }) } @@ -801,7 +775,7 @@ func removeObjectFromBucket(ctx context.Context) { func getObjectFile() string { return ` - apiVersion: source.toolkit.fluxcd.io/v1beta2 + apiVersion: source.toolkit.fluxcd.io/v1 kind: Bucket metadata: name: podinfo @@ -817,7 +791,7 @@ func getObjectFile() string { } func loadServerCertAndClientTLSConfig() (serverCert string, serverKey string, clientConf *tls.Config, err error) { - const certsDir = "../../internal/controller/testdata/certs" + const certsDir = "../../controller/testdata/certs" clientConf = &tls.Config{} serverCert, err = filepath.Abs(filepath.Join(certsDir, "server.pem")) diff --git a/internal/controller/artifact.go b/internal/controller/artifact.go index 0de6b3706..bebc8d5ae 100644 --- a/internal/controller/artifact.go +++ b/internal/controller/artifact.go @@ -16,9 +16,11 @@ limitations under the License. package controller -import sourcev1 "github.com/fluxcd/source-controller/api/v1" +import ( + "github.com/fluxcd/pkg/apis/meta" +) -type artifactSet []*sourcev1.Artifact +type artifactSet []*meta.Artifact // Diff returns true if any of the revisions in the artifactSet does not match any of the given artifacts. func (s artifactSet) Diff(set artifactSet) bool { diff --git a/internal/controller/artifact_matchers_test.go b/internal/controller/artifact_matchers_test.go index 39f0c9dd7..af716e086 100644 --- a/internal/controller/artifact_matchers_test.go +++ b/internal/controller/artifact_matchers_test.go @@ -19,24 +19,25 @@ package controller import ( "fmt" - sourcev1 "github.com/fluxcd/source-controller/api/v1" . "github.com/onsi/gomega" "github.com/onsi/gomega/types" + + "github.com/fluxcd/pkg/apis/meta" ) // MatchArtifact returns a custom matcher to check equality of a v1beta1.Artifact, the timestamp and URL are ignored. -func MatchArtifact(expected *sourcev1.Artifact) types.GomegaMatcher { +func MatchArtifact(expected *meta.Artifact) types.GomegaMatcher { return &matchArtifact{ expected: expected, } } type matchArtifact struct { - expected *sourcev1.Artifact + expected *meta.Artifact } func (m matchArtifact) Match(actual interface{}) (success bool, err error) { - actualArtifact, ok := actual.(*sourcev1.Artifact) + actualArtifact, ok := actual.(*meta.Artifact) if !ok { return false, fmt.Errorf("actual should be a pointer to an Artifact") } diff --git a/internal/controller/bucket_controller.go b/internal/controller/bucket_controller.go index a1879c456..7fe881be6 100644 --- a/internal/controller/bucket_controller.go +++ b/internal/controller/bucket_controller.go @@ -18,7 +18,7 @@ package controller import ( "context" - stdtls "crypto/tls" + "crypto/tls" "errors" "fmt" "net/url" @@ -44,24 +44,27 @@ import ( eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" "github.com/fluxcd/pkg/apis/meta" + intdigest "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" + "github.com/fluxcd/pkg/cache" "github.com/fluxcd/pkg/runtime/conditions" helper "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/jitter" "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" rreconcile "github.com/fluxcd/pkg/runtime/reconcile" + "github.com/fluxcd/pkg/runtime/secrets" "github.com/fluxcd/pkg/sourceignore" sourcev1 "github.com/fluxcd/source-controller/api/v1" - intdigest "github.com/fluxcd/source-controller/internal/digest" + "github.com/fluxcd/source-controller/internal/bucket/azure" + "github.com/fluxcd/source-controller/internal/bucket/gcp" + "github.com/fluxcd/source-controller/internal/bucket/minio" serror "github.com/fluxcd/source-controller/internal/error" "github.com/fluxcd/source-controller/internal/index" sreconcile "github.com/fluxcd/source-controller/internal/reconcile" "github.com/fluxcd/source-controller/internal/reconcile/summarize" - "github.com/fluxcd/source-controller/internal/tls" - "github.com/fluxcd/source-controller/pkg/azure" - "github.com/fluxcd/source-controller/pkg/gcp" - "github.com/fluxcd/source-controller/pkg/minio" ) // maxConcurrentBucketFetches is the upper bound on the goroutines used to @@ -77,7 +80,7 @@ import ( const maxConcurrentBucketFetches = 100 // bucketReadyCondition contains the information required to summarize a -// v1beta2.Bucket Ready Condition. +// v1.Bucket Ready Condition. var bucketReadyCondition = summarize.Conditions{ Target: meta.ReadyCondition, Owned: []string{ @@ -116,15 +119,18 @@ var bucketFailConditions = []string{ // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/status,verbs=get;update;patch // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/finalizers,verbs=get;create;update;patch;delete // +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=serviceaccounts/token,verbs=create -// BucketReconciler reconciles a v1beta2.Bucket object. +// BucketReconciler reconciles a v1.Bucket object. type BucketReconciler struct { client.Client kuberecorder.EventRecorder helper.Metrics - Storage *Storage + Storage *storage.Storage ControllerName string + TokenCache *cache.TokenCache patchOptions []patch.Option } @@ -155,7 +161,16 @@ type BucketProvider interface { Close(context.Context) } -// bucketReconcileFunc is the function type for all the v1beta2.Bucket +// bucketCredentials contains all credentials and configuration needed for bucket providers. +type bucketCredentials struct { + secret *corev1.Secret + proxyURL *url.URL + tlsConfig *tls.Config + stsSecret *corev1.Secret + stsTLSConfig *tls.Config +} + +// bucketReconcileFunc is the function type for all the v1.Bucket // (sub)reconcile functions. The type implementations are grouped and // executed serially to perform the complete reconcile of the object. type bucketReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) @@ -399,7 +414,7 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, sp *patch.Seria if artifactMissing { msg += ": disappeared from storage" } - rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, msg) + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg) conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) @@ -418,165 +433,62 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, sp *patch.Seria // reconcileSource fetches the upstream bucket contents with the client for the // given object's Provider, and returns the result. // When a SecretRef is defined, it attempts to fetch the Secret before calling -// the provider. If this fails, it records v1beta2.FetchFailedCondition=True on +// the provider. If this fails, it records v1.FetchFailedCondition=True on // the object and returns early. func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) { - secret, err := r.getSecret(ctx, obj.Spec.SecretRef, obj.GetNamespace()) - if err != nil { - e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) - // Return error as the world as observed may change - return sreconcile.ResultEmpty, e + usesObjectLevelWorkloadIdentity := obj.Spec.Provider != "" && obj.Spec.Provider != sourcev1.BucketProviderGeneric && obj.Spec.ServiceAccountName != "" + if usesObjectLevelWorkloadIdentity { + if !auth.IsObjectLevelWorkloadIdentityEnabled() { + const gate = auth.FeatureGateObjectLevelWorkloadIdentity + const msgFmt = "to use spec.serviceAccountName for provider authentication please enable the %s feature gate in the controller" + err := fmt.Errorf(msgFmt, gate) + e := serror.NewStalling(err, meta.FeatureGateDisabledReason) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } } - proxyURL, err := r.getProxyURL(ctx, obj) + + creds, err := r.setupCredentials(ctx, obj) if err != nil { e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } - // Construct provider client - var provider BucketProvider - switch obj.Spec.Provider { - case sourcev1.BucketProviderGoogle: - if err = gcp.ValidateSecret(secret); err != nil { - e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) - return sreconcile.ResultEmpty, e - } - var opts []gcp.Option - if secret != nil { - opts = append(opts, gcp.WithSecret(secret)) - } - if proxyURL != nil { - opts = append(opts, gcp.WithProxyURL(proxyURL)) - } - if provider, err = gcp.NewClient(ctx, opts...); err != nil { - e := serror.NewGeneric(err, "ClientError") - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) - return sreconcile.ResultEmpty, e - } - case sourcev1.BucketProviderAzure: - if err = azure.ValidateSecret(secret); err != nil { - e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) - return sreconcile.ResultEmpty, e - } - var opts []azure.Option - if secret != nil { - opts = append(opts, azure.WithSecret(secret)) - } - if proxyURL != nil { - opts = append(opts, azure.WithProxyURL(proxyURL)) - } - if provider, err = azure.NewClient(obj, opts...); err != nil { - e := serror.NewGeneric(err, "ClientError") - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) - return sreconcile.ResultEmpty, e - } - default: - if err = minio.ValidateSecret(secret); err != nil { - e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) - return sreconcile.ResultEmpty, e - } - tlsConfig, err := r.getTLSConfig(ctx, obj.Spec.CertSecretRef, obj.GetNamespace(), obj.Spec.Endpoint) - if err != nil { - e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) - return sreconcile.ResultEmpty, e - } - stsSecret, err := r.getSTSSecret(ctx, obj) - if err != nil { - e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) - return sreconcile.ResultEmpty, e - } - stsTLSConfig, err := r.getSTSTLSConfig(ctx, obj) - if err != nil { - err := fmt.Errorf("failed to get STS TLS config: %w", err) + provider, err := r.createBucketProvider(ctx, obj, creds) + if err != nil { + var stallingErr *serror.Stalling + var genericErr *serror.Generic + if errors.As(err, &stallingErr) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, stallingErr.Reason, "%s", stallingErr) + return sreconcile.ResultEmpty, stallingErr + } else if errors.As(err, &genericErr) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, genericErr.Reason, "%s", genericErr) + return sreconcile.ResultEmpty, genericErr + } else { e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } - if sts := obj.Spec.STS; sts != nil { - if err := minio.ValidateSTSProvider(obj.Spec.Provider, sts); err != nil { - e := serror.NewStalling(err, sourcev1.InvalidSTSConfigurationReason) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) - return sreconcile.ResultEmpty, e - } - if _, err := url.Parse(sts.Endpoint); err != nil { - err := fmt.Errorf("failed to parse STS endpoint '%s': %w", sts.Endpoint, err) - e := serror.NewStalling(err, sourcev1.URLInvalidReason) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) - return sreconcile.ResultEmpty, e - } - if err := minio.ValidateSTSSecret(sts.Provider, stsSecret); err != nil { - e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) - return sreconcile.ResultEmpty, e - } - } - var opts []minio.Option - if secret != nil { - opts = append(opts, minio.WithSecret(secret)) - } - if tlsConfig != nil { - opts = append(opts, minio.WithTLSConfig(tlsConfig)) - } - if proxyURL != nil { - opts = append(opts, minio.WithProxyURL(proxyURL)) - } - if stsSecret != nil { - opts = append(opts, minio.WithSTSSecret(stsSecret)) - } - if stsTLSConfig != nil { - opts = append(opts, minio.WithSTSTLSConfig(stsTLSConfig)) - } - if provider, err = minio.NewClient(obj, opts...); err != nil { - e := serror.NewGeneric(err, "ClientError") - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) - return sreconcile.ResultEmpty, e - } } - - // Fetch etag index - if err = fetchEtagIndex(ctx, provider, obj, index, dir); err != nil { + changed, err := r.syncBucketArtifacts(ctx, provider, obj, index, dir) + if err != nil { e := serror.NewGeneric(err, sourcev1.BucketOperationFailedReason) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } - // Check if index has changed compared to current Artifact revision. - var changed bool - if artifact := obj.Status.Artifact; artifact != nil && artifact.Revision != "" { - curRev := digest.Digest(artifact.Revision) - changed = curRev.Validate() != nil || curRev != index.Digest(curRev.Algorithm()) - } - - // Fetch the bucket objects if required to. - if artifact := obj.GetArtifact(); artifact == nil || changed { - // Mark observations about the revision on the object - defer func() { - // As fetchIndexFiles can make last-minute modifications to the etag - // index, we need to re-calculate the revision at the end - revision := index.Digest(intdigest.Canonical) - - message := fmt.Sprintf("new upstream revision '%s'", revision) - if obj.GetArtifact() != nil { - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "%s", message) - } - rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message) - if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { - ctrl.LoggerFrom(ctx).Error(err, "failed to patch") - return - } - }() - - if err = fetchIndexFiles(ctx, provider, obj, index, dir); err != nil { - e := serror.NewGeneric(err, sourcev1.BucketOperationFailedReason) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) - return sreconcile.ResultEmpty, e + // Update artifact status if changes were detected + if changed { + revision := index.Digest(intdigest.Canonical) + message := fmt.Sprintf("new upstream revision '%s'", revision) + if obj.GetArtifact() != nil { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "%s", message) + } + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message) + if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to patch") + return sreconcile.ResultEmpty, err } } @@ -588,7 +500,7 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.Serial // (Status) data on the object does not match the given. // // The inspection of the given data to the object is differed, ensuring any -// stale observations like v1beta2.ArtifactOutdatedCondition are removed. +// stale observations like v1.ArtifactOutdatedCondition are removed. // If the given Artifact does not differ from the object's current, it returns // early. // On a successful archive, the Artifact in the Status of the object is set, @@ -696,6 +608,10 @@ func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bu // Remove our finalizer from the list controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + // Cleanup caches. + r.TokenCache.DeleteEventsForObject(sourcev1.BucketKind, + obj.GetName(), obj.GetNamespace(), cache.OperationReconcile) + // Stop reconciliation as the object is being deleted return sreconcile.ResultEmpty, nil } @@ -729,92 +645,13 @@ func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Buc } if len(delFiles) > 0 { r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", - fmt.Sprintf("garbage collected %d artifacts", len(delFiles))) + "garbage collected %d artifacts", len(delFiles)) return nil } } return nil } -// getSecret attempts to fetch a Secret reference if specified. It returns any client error. -func (r *BucketReconciler) getSecret(ctx context.Context, secretRef *meta.LocalObjectReference, - namespace string) (*corev1.Secret, error) { - if secretRef == nil { - return nil, nil - } - secretName := types.NamespacedName{ - Namespace: namespace, - Name: secretRef.Name, - } - secret := &corev1.Secret{} - if err := r.Get(ctx, secretName, secret); err != nil { - return nil, fmt.Errorf("failed to get secret '%s': %w", secretName.String(), err) - } - return secret, nil -} - -// getTLSConfig attempts to fetch a TLS configuration from the given -// Secret reference, namespace and endpoint. -func (r *BucketReconciler) getTLSConfig(ctx context.Context, - secretRef *meta.LocalObjectReference, namespace, endpoint string) (*stdtls.Config, error) { - certSecret, err := r.getSecret(ctx, secretRef, namespace) - if err != nil || certSecret == nil { - return nil, err - } - tlsConfig, _, err := tls.KubeTLSClientConfigFromSecret(*certSecret, endpoint) - if err != nil { - return nil, fmt.Errorf("failed to create TLS config: %w", err) - } - if tlsConfig == nil { - return nil, fmt.Errorf("certificate secret does not contain any TLS configuration") - } - return tlsConfig, nil -} - -// getProxyURL attempts to fetch a proxy URL from the object's proxy secret -// reference. -func (r *BucketReconciler) getProxyURL(ctx context.Context, obj *sourcev1.Bucket) (*url.URL, error) { - namespace := obj.GetNamespace() - proxySecret, err := r.getSecret(ctx, obj.Spec.ProxySecretRef, namespace) - if err != nil || proxySecret == nil { - return nil, err - } - proxyData := proxySecret.Data - address, ok := proxyData["address"] - if !ok { - return nil, fmt.Errorf("invalid proxy secret '%s/%s': key 'address' is missing", - namespace, obj.Spec.ProxySecretRef.Name) - } - proxyURL, err := url.Parse(string(address)) - if err != nil { - return nil, fmt.Errorf("failed to parse proxy address '%s': %w", address, err) - } - user, hasUser := proxyData["username"] - password, hasPassword := proxyData["password"] - if hasUser || hasPassword { - proxyURL.User = url.UserPassword(string(user), string(password)) - } - return proxyURL, nil -} - -// getSTSSecret attempts to fetch the secret from the object's STS secret -// reference. -func (r *BucketReconciler) getSTSSecret(ctx context.Context, obj *sourcev1.Bucket) (*corev1.Secret, error) { - if obj.Spec.STS == nil { - return nil, nil - } - return r.getSecret(ctx, obj.Spec.STS.SecretRef, obj.GetNamespace()) -} - -// getSTSTLSConfig attempts to fetch the certificate secret from the object's -// STS configuration. -func (r *BucketReconciler) getSTSTLSConfig(ctx context.Context, obj *sourcev1.Bucket) (*stdtls.Config, error) { - if obj.Spec.STS == nil { - return nil, nil - } - return r.getTLSConfig(ctx, obj.Spec.STS.CertSecretRef, obj.GetNamespace(), obj.Spec.STS.Endpoint) -} - // eventLogf records events, and logs at the same time. // // This log is different from the debug log in the EventRecorder, in the sense @@ -943,3 +780,206 @@ func fetchIndexFiles(ctx context.Context, provider BucketProvider, obj *sourcev1 return nil } + +// setupCredentials retrieves and validates secrets for authentication, TLS configuration, and proxy settings. +// It returns all credentials needed for bucket providers. +func (r *BucketReconciler) setupCredentials(ctx context.Context, obj *sourcev1.Bucket) (*bucketCredentials, error) { + var secret *corev1.Secret + if obj.Spec.SecretRef != nil { + secretName := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.SecretRef.Name, + } + secret = &corev1.Secret{} + if err := r.Get(ctx, secretName, secret); err != nil { + return nil, fmt.Errorf("failed to get secret '%s': %w", secretName, err) + } + } + + var stsSecret *corev1.Secret + if obj.Spec.STS != nil && obj.Spec.STS.SecretRef != nil { + secretName := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.STS.SecretRef.Name, + } + stsSecret = &corev1.Secret{} + if err := r.Get(ctx, secretName, stsSecret); err != nil { + return nil, fmt.Errorf("failed to get STS secret '%s': %w", secretName, err) + } + } + + var ( + err error + proxyURL *url.URL + tlsConfig *tls.Config + stsTLSConfig *tls.Config + ) + + if obj.Spec.ProxySecretRef != nil { + secretRef := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.ProxySecretRef.Name, + } + proxyURL, err = secrets.ProxyURLFromSecretRef(ctx, r.Client, secretRef) + if err != nil { + return nil, fmt.Errorf("failed to get proxy URL: %w", err) + } + } + + if obj.Spec.CertSecretRef != nil { + secretRef := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.CertSecretRef.Name, + } + tlsConfig, err = secrets.TLSConfigFromSecretRef(ctx, r.Client, secretRef, secrets.WithSystemCertPool()) + if err != nil { + return nil, fmt.Errorf("failed to get TLS config: %w", err) + } + } + + if obj.Spec.STS != nil && obj.Spec.STS.CertSecretRef != nil { + secretRef := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.STS.CertSecretRef.Name, + } + stsTLSConfig, err = secrets.TLSConfigFromSecretRef(ctx, r.Client, secretRef, secrets.WithSystemCertPool()) + if err != nil { + return nil, fmt.Errorf("failed to get STS TLS config: %w", err) + } + } + + return &bucketCredentials{ + secret: secret, + proxyURL: proxyURL, + tlsConfig: tlsConfig, + stsSecret: stsSecret, + stsTLSConfig: stsTLSConfig, + }, nil +} + +// createBucketProvider creates a provider-specific bucket client using the given credentials and configuration. +// It handles different bucket providers (AWS, GCP, Azure, generic) and returns the appropriate client. +func (r *BucketReconciler) createBucketProvider(ctx context.Context, obj *sourcev1.Bucket, creds *bucketCredentials) (BucketProvider, error) { + authOpts := []auth.Option{ + auth.WithClient(r.Client), + auth.WithServiceAccountNamespace(obj.GetNamespace()), + } + + if obj.Spec.ServiceAccountName != "" { + authOpts = append(authOpts, auth.WithServiceAccountName(obj.Spec.ServiceAccountName)) + } + + if r.TokenCache != nil { + involvedObject := cache.InvolvedObject{ + Kind: sourcev1.BucketKind, + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + Operation: cache.OperationReconcile, + } + authOpts = append(authOpts, auth.WithCache(*r.TokenCache, involvedObject)) + } + + if creds.proxyURL != nil { + authOpts = append(authOpts, auth.WithProxyURL(*creds.proxyURL)) + } + + if obj.Spec.Region != "" { + authOpts = append(authOpts, auth.WithSTSRegion(obj.Spec.Region)) + } + + if sts := obj.Spec.STS; sts != nil { + authOpts = append(authOpts, auth.WithSTSEndpoint(sts.Endpoint)) + } + + switch obj.Spec.Provider { + case sourcev1.BucketProviderGoogle: + var opts []gcp.Option + if creds.proxyURL != nil { + opts = append(opts, gcp.WithProxyURL(creds.proxyURL)) + } + + if creds.secret != nil { + if err := gcp.ValidateSecret(creds.secret); err != nil { + return nil, err + } + opts = append(opts, gcp.WithSecret(creds.secret)) + } else { + opts = append(opts, gcp.WithAuth(authOpts...)) + } + + return gcp.NewClient(ctx, obj, opts...) + + case sourcev1.BucketProviderAzure: + if err := azure.ValidateSecret(creds.secret); err != nil { + return nil, err + } + var opts []azure.Option + if creds.secret != nil { + opts = append(opts, azure.WithSecret(creds.secret)) + } + if creds.proxyURL != nil { + opts = append(opts, azure.WithProxyURL(creds.proxyURL)) + } + opts = append(opts, azure.WithAuth(authOpts...)) + return azure.NewClient(ctx, obj, opts...) + + default: + if err := minio.ValidateSecret(creds.secret); err != nil { + return nil, err + } + if sts := obj.Spec.STS; sts != nil { + if err := minio.ValidateSTSProvider(obj.Spec.Provider, sts); err != nil { + return nil, serror.NewStalling(err, sourcev1.InvalidSTSConfigurationReason) + } + if _, err := url.Parse(sts.Endpoint); err != nil { + return nil, serror.NewStalling(fmt.Errorf("failed to parse STS endpoint '%s': %w", sts.Endpoint, err), sourcev1.URLInvalidReason) + } + if err := minio.ValidateSTSSecret(sts.Provider, creds.stsSecret); err != nil { + return nil, serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) + } + } + var opts []minio.Option + if creds.secret != nil { + opts = append(opts, minio.WithSecret(creds.secret)) + } else if obj.Spec.Provider == sourcev1.BucketProviderAmazon { + opts = append(opts, minio.WithAuth(authOpts...)) + } + if creds.tlsConfig != nil { + opts = append(opts, minio.WithTLSConfig(creds.tlsConfig)) + } + if creds.proxyURL != nil { + opts = append(opts, minio.WithProxyURL(creds.proxyURL)) + } + if creds.stsSecret != nil { + opts = append(opts, minio.WithSTSSecret(creds.stsSecret)) + } + if creds.stsTLSConfig != nil { + opts = append(opts, minio.WithSTSTLSConfig(creds.stsTLSConfig)) + } + return minio.NewClient(ctx, obj, opts...) + } +} + +// syncBucketArtifacts handles etag index retrieval and bucket object fetching. +// It fetches the etag index from the provider and downloads objects to the specified directory. +// Returns true if changes were detected and artifacts were updated. +func (r *BucketReconciler) syncBucketArtifacts(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *index.Digester, dir string) (bool, error) { + if err := fetchEtagIndex(ctx, provider, obj, index, dir); err != nil { + return false, err + } + var changed bool + if artifact := obj.Status.Artifact; artifact != nil && artifact.Revision != "" { + curRev := digest.Digest(artifact.Revision) + changed = curRev.Validate() != nil || curRev != index.Digest(curRev.Algorithm()) + } + + // Fetch the bucket objects if required to. + if artifact := obj.GetArtifact(); artifact == nil || changed { + if err := fetchIndexFiles(ctx, provider, obj, index, dir); err != nil { + return false, err + } + return true, nil + } + + return false, nil +} diff --git a/internal/controller/bucket_controller_fetch_test.go b/internal/controller/bucket_controller_fetch_test.go index ead96fb99..707d645f3 100644 --- a/internal/controller/bucket_controller_fetch_test.go +++ b/internal/controller/bucket_controller_fetch_test.go @@ -24,7 +24,7 @@ import ( "testing" "time" - "gotest.tools/assert" + . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" sourcev1 "github.com/fluxcd/source-controller/api/v1" @@ -119,7 +119,8 @@ func Test_fetchEtagIndex(t *testing.T) { t.Fatal(err) } - assert.Equal(t, index.Len(), 3) + g := NewWithT(t) + g.Expect(index.Len()).To(Equal(3)) }) t.Run("an error while bucket does not exist", func(t *testing.T) { @@ -129,7 +130,9 @@ func Test_fetchEtagIndex(t *testing.T) { index := index.NewDigester() err := fetchEtagIndex(context.TODO(), client, bucket.DeepCopy(), index, tmp) - assert.ErrorContains(t, err, "not found") + g := NewWithT(t) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("not found")) }) t.Run("filters with .sourceignore rules", func(t *testing.T) { @@ -153,7 +156,8 @@ func Test_fetchEtagIndex(t *testing.T) { if ok := index.Has("foo.txt"); ok { t.Error(fmt.Errorf("expected 'foo.txt' index item to not exist")) } - assert.Equal(t, index.Len(), 1) + g := NewWithT(t) + g.Expect(index.Len()).To(Equal(1)) }) t.Run("filters with ignore rules from object", func(t *testing.T) { @@ -177,7 +181,8 @@ func Test_fetchEtagIndex(t *testing.T) { t.Error(err) } - assert.Equal(t, index.Len(), 1) + g := NewWithT(t) + g.Expect(index.Len()).To(Equal(1)) if ok := index.Has("foo.txt"); !ok { t.Error(fmt.Errorf("expected 'foo.txt' index item to exist")) } @@ -243,7 +248,8 @@ func Test_fetchFiles(t *testing.T) { t.Fatal(err) } f := index.Get("foo.yaml") - assert.Equal(t, f, "etag2") + g := NewWithT(t) + g.Expect(f).To(Equal("etag2")) }) t.Run("a disappeared index entry is removed from the index", func(t *testing.T) { @@ -262,8 +268,9 @@ func Test_fetchFiles(t *testing.T) { t.Fatal(err) } f := index.Get("foo.yaml") - assert.Equal(t, f, "etag1") - assert.Check(t, !index.Has("bar.yaml")) + g := NewWithT(t) + g.Expect(f).To(Equal("etag1")) + g.Expect(index.Has("bar.yaml")).To(BeFalse()) }) t.Run("can fetch more than maxConcurrentFetches", func(t *testing.T) { diff --git a/internal/controller/bucket_controller_test.go b/internal/controller/bucket_controller_test.go index 7563d6e99..00ed46cb7 100644 --- a/internal/controller/bucket_controller_test.go +++ b/internal/controller/bucket_controller_test.go @@ -38,13 +38,15 @@ import ( kstatus "github.com/fluxcd/cli-utils/pkg/kstatus/status" "github.com/fluxcd/pkg/apis/meta" + intdigest "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" "github.com/fluxcd/pkg/runtime/conditions" conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check" "github.com/fluxcd/pkg/runtime/jitter" "github.com/fluxcd/pkg/runtime/patch" sourcev1 "github.com/fluxcd/source-controller/api/v1" - intdigest "github.com/fluxcd/source-controller/internal/digest" "github.com/fluxcd/source-controller/internal/index" gcsmock "github.com/fluxcd/source-controller/internal/mock/gcs" s3mock "github.com/fluxcd/source-controller/internal/mock/s3" @@ -196,20 +198,20 @@ func TestBucketReconciler_Reconcile(t *testing.T) { func TestBucketReconciler_reconcileStorage(t *testing.T) { tests := []struct { name string - beforeFunc func(obj *sourcev1.Bucket, storage *Storage) error + beforeFunc func(obj *sourcev1.Bucket, storage *storage.Storage) error want sreconcile.Result wantErr bool - assertArtifact *sourcev1.Artifact + assertArtifact *meta.Artifact assertConditions []metav1.Condition assertPaths []string }{ { name: "garbage collects", - beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { + beforeFunc: func(obj *sourcev1.Bucket, storage *storage.Storage) error { revisions := []string{"a", "b", "c", "d"} for n := range revisions { v := revisions[n] - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), Revision: v, } @@ -227,7 +229,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") return nil }, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Path: "/reconcile-storage/d.txt", Revision: "d", Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", @@ -255,8 +257,8 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { }, { name: "notices missing artifact in storage", - beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.Bucket, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ Path: "/reconcile-storage/invalid.txt", Revision: "d", } @@ -274,10 +276,10 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { }, { name: "notices empty artifact digest", - beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { + beforeFunc: func(obj *sourcev1.Bucket, storage *storage.Storage) error { f := "empty-digest.txt" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), Revision: "fake", } @@ -305,10 +307,10 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { }, { name: "notices artifact digest mismatch", - beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { + beforeFunc: func(obj *sourcev1.Bucket, storage *storage.Storage) error { f := "digest-mismatch.txt" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), Revision: "fake", } @@ -336,8 +338,8 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { }, { name: "updates hostname on diff from current", - beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.Bucket, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ Path: "/reconcile-storage/hostname.txt", Revision: "f", Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", @@ -356,7 +358,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { assertPaths: []string{ "/reconcile-storage/hostname.txt", }, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Path: "/reconcile-storage/hostname.txt", Revision: "f", Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", @@ -437,6 +439,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { bucketObjects []*s3mock.Object middleware http.Handler secret *corev1.Secret + serviceAccount *corev1.ServiceAccount beforeFunc func(obj *sourcev1.Bucket) want sreconcile.Result wantErr bool @@ -522,7 +525,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { wantErr: true, assertIndex: index.NewDigester(), assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/dummy': secrets \"dummy\" not found"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get TLS config: secret '/dummy' not found"), *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, @@ -547,7 +550,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "certificate secret does not contain any TLS configuration"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get TLS config: secret '/dummy' must contain either 'ca.crt' or both 'tls.crt' and 'tls.key'"), }, }, { @@ -563,7 +566,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { wantErr: true, assertIndex: index.NewDigester(), assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/dummy': secrets \"dummy\" not found"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get proxy URL: secret '/dummy' not found"), *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, @@ -575,6 +578,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "dummy", }, + Data: map[string][]byte{}, }, beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.ProxySecretRef = &meta.LocalObjectReference{ @@ -588,7 +592,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "invalid proxy secret '/dummy': key 'address' is missing"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get proxy URL: secret '/dummy': key 'address' not found"), }, }, { @@ -604,7 +608,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { wantErr: true, assertIndex: index.NewDigester(), assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/dummy': secrets \"dummy\" not found"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get STS secret '/dummy': secrets \"dummy\" not found"), *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, @@ -648,7 +652,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { wantErr: true, assertIndex: index.NewDigester(), assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/dummy': secrets \"dummy\" not found"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get STS TLS config: secret '/dummy' not found"), *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, @@ -676,7 +680,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get STS TLS config: certificate secret does not contain any TLS configuration"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get STS TLS config: secret '/dummy' must contain either 'ca.crt' or both 'tls.crt' and 'tls.key'"), }, }, { @@ -823,7 +827,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { name: "Up-to-date artifact", bucketName: "dummy", beforeFunc: func(obj *sourcev1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Revision: "sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479", } conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") @@ -881,7 +885,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { }, }, beforeFunc: func(obj *sourcev1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: "some-path", Revision: "some-rev", } @@ -909,6 +913,10 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { clientBuilder.WithObjects(tt.secret) } + if tt.serviceAccount != nil { + clientBuilder.WithObjects(tt.serviceAccount) + } + r := &BucketReconciler{ EventRecorder: record.NewFakeRecorder(32), Client: clientBuilder.Build(), @@ -971,15 +979,17 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { tests := []struct { - name string - bucketName string - bucketObjects []*gcsmock.Object - secret *corev1.Secret - beforeFunc func(obj *sourcev1.Bucket) - want sreconcile.Result - wantErr bool - assertIndex *index.Digester - assertConditions []metav1.Condition + name string + bucketName string + bucketObjects []*gcsmock.Object + secret *corev1.Secret + serviceAccount *corev1.ServiceAccount + beforeFunc func(obj *sourcev1.Bucket) + want sreconcile.Result + wantErr bool + assertIndex *index.Digester + assertConditions []metav1.Condition + disableObjectLevelWorkloadIdentity bool }{ { name: "Reconciles GCS source", @@ -1073,7 +1083,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { wantErr: true, assertIndex: index.NewDigester(), assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/dummy': secrets \"dummy\" not found"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get proxy URL: secret '/dummy' not found"), *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, @@ -1097,7 +1107,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { wantErr: true, assertIndex: index.NewDigester(), assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "invalid proxy secret '/dummy': key 'address' is missing"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get proxy URL: secret '/dummy': key 'address' not found"), *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, @@ -1209,7 +1219,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { name: "Up-to-date artifact", bucketName: "dummy", beforeFunc: func(obj *sourcev1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Revision: "sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479", } conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") @@ -1267,7 +1277,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { }, }, beforeFunc: func(obj *sourcev1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: "some-path", Revision: "some-rev", } @@ -1282,6 +1292,80 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), }, }, + { + name: "GCS Object-Level Workload Identity (no secret)", + bucketName: "dummy", + bucketObjects: []*gcsmock.Object{ + { + Key: "test.txt", + ContentType: "text/plain", + Content: []byte("test"), + Generation: 3, + }, + }, + serviceAccount: &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sa", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.ServiceAccountName = "test-sa" + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + }, + }, + { + name: "GCS Controller-Level Workload Identity (no secret, no SA)", + bucketName: "dummy", + bucketObjects: []*gcsmock.Object{ + { + Key: "test.txt", + ContentType: "text/plain", + Content: []byte("test"), + Generation: 3, + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + // ServiceAccountName は設定しない (Controller-Level) + }, + want: sreconcile.ResultSuccess, + assertIndex: index.NewDigester(index.WithIndex(map[string]string{ + "test.txt": "098f6bcd4621d373cade4e832627b4f6", + })), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"), + }, + }, + { + name: "GCS Object-Level fails when feature gate disabled", + bucketName: "dummy", + serviceAccount: &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sa", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.ServiceAccountName = "test-sa" + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertIndex: index.NewDigester(), + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FeatureGateDisabledReason, "to use spec.serviceAccountName for provider authentication please enable the ObjectLevelWorkloadIdentity feature gate in the controller"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), + }, + disableObjectLevelWorkloadIdentity: true, + }, // TODO: Middleware for mock server to test authentication using secret. } for _, tt := range tests { @@ -1296,12 +1380,23 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { clientBuilder.WithObjects(tt.secret) } + if tt.serviceAccount != nil { + clientBuilder.WithObjects(tt.serviceAccount) + } + r := &BucketReconciler{ EventRecorder: record.NewFakeRecorder(32), Client: clientBuilder.Build(), Storage: testStorage, patchOptions: getPatchOptions(bucketReadyCondition.Owned, "sc"), } + + // Handle ObjectLevelWorkloadIdentity feature gate + if !tt.disableObjectLevelWorkloadIdentity { + auth.EnableObjectLevelWorkloadIdentity() + t.Cleanup(auth.DisableObjectLevelWorkloadIdentity) + } + tmpDir := t.TempDir() // Test bucket object. @@ -1393,7 +1488,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { revision := index.Digest(intdigest.Canonical) obj.Spec.Interval = metav1.Duration{Duration: interval} // Incomplete artifact - obj.Status.Artifact = &sourcev1.Artifact{Revision: revision.String()} + obj.Status.Artifact = &meta.Artifact{Revision: revision.String()} conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") }, @@ -1503,7 +1598,6 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ GenerateName: "test-bucket-", Generation: 1, - Namespace: "default", }, Spec: sourcev1.BucketSpec{ Timeout: &metav1.Duration{Duration: timeout}, @@ -1657,7 +1751,7 @@ func TestBucketReconciler_notify(t *testing.T) { res: sreconcile.ResultSuccess, resErr: nil, newObjBeforeFunc: func(obj *sourcev1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} }, wantEvent: "Normal NewArtifact stored artifact with 2 fetched files from", }, @@ -1666,12 +1760,12 @@ func TestBucketReconciler_notify(t *testing.T) { res: sreconcile.ResultSuccess, resErr: nil, oldObjBeforeFunc: func(obj *sourcev1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, newObjBeforeFunc: func(obj *sourcev1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, wantEvent: "Normal Succeeded stored artifact with 2 fetched files from", @@ -1681,12 +1775,12 @@ func TestBucketReconciler_notify(t *testing.T) { res: sreconcile.ResultSuccess, resErr: nil, oldObjBeforeFunc: func(obj *sourcev1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, newObjBeforeFunc: func(obj *sourcev1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Digest: "bbb"} + obj.Status.Artifact = &meta.Artifact{Revision: "aaa", Digest: "bbb"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, wantEvent: "Normal NewArtifact stored artifact with 2 fetched files from", @@ -1696,11 +1790,11 @@ func TestBucketReconciler_notify(t *testing.T) { res: sreconcile.ResultSuccess, resErr: nil, oldObjBeforeFunc: func(obj *sourcev1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, newObjBeforeFunc: func(obj *sourcev1.Bucket) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, }, @@ -1751,191 +1845,6 @@ func TestBucketReconciler_notify(t *testing.T) { } } -func TestBucketReconciler_getProxyURL(t *testing.T) { - tests := []struct { - name string - bucket *sourcev1.Bucket - objects []client.Object - expectedURL string - expectedErr string - }{ - { - name: "empty proxySecretRef", - bucket: &sourcev1.Bucket{ - Spec: sourcev1.BucketSpec{ - ProxySecretRef: nil, - }, - }, - }, - { - name: "non-existing proxySecretRef", - bucket: &sourcev1.Bucket{ - Spec: sourcev1.BucketSpec{ - ProxySecretRef: &meta.LocalObjectReference{ - Name: "non-existing", - }, - }, - }, - expectedErr: "failed to get secret '/non-existing': secrets \"non-existing\" not found", - }, - { - name: "missing address in proxySecretRef", - bucket: &sourcev1.Bucket{ - Spec: sourcev1.BucketSpec{ - ProxySecretRef: &meta.LocalObjectReference{ - Name: "dummy", - }, - }, - }, - objects: []client.Object{ - &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dummy", - }, - Data: map[string][]byte{}, - }, - }, - expectedErr: "invalid proxy secret '/dummy': key 'address' is missing", - }, - { - name: "invalid address in proxySecretRef", - bucket: &sourcev1.Bucket{ - Spec: sourcev1.BucketSpec{ - ProxySecretRef: &meta.LocalObjectReference{ - Name: "dummy", - }, - }, - }, - objects: []client.Object{ - &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dummy", - }, - Data: map[string][]byte{ - "address": {0x7f}, - }, - }, - }, - expectedErr: "failed to parse proxy address '\x7f': parse \"\\x7f\": net/url: invalid control character in URL", - }, - { - name: "no user, no password", - bucket: &sourcev1.Bucket{ - Spec: sourcev1.BucketSpec{ - ProxySecretRef: &meta.LocalObjectReference{ - Name: "dummy", - }, - }, - }, - objects: []client.Object{ - &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dummy", - }, - Data: map[string][]byte{ - "address": []byte("http://proxy.example.com"), - }, - }, - }, - expectedURL: "http://proxy.example.com", - }, - { - name: "user, no password", - bucket: &sourcev1.Bucket{ - Spec: sourcev1.BucketSpec{ - ProxySecretRef: &meta.LocalObjectReference{ - Name: "dummy", - }, - }, - }, - objects: []client.Object{ - &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dummy", - }, - Data: map[string][]byte{ - "address": []byte("http://proxy.example.com"), - "username": []byte("user"), - }, - }, - }, - expectedURL: "http://user:@proxy.example.com", - }, - { - name: "no user, password", - bucket: &sourcev1.Bucket{ - Spec: sourcev1.BucketSpec{ - ProxySecretRef: &meta.LocalObjectReference{ - Name: "dummy", - }, - }, - }, - objects: []client.Object{ - &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dummy", - }, - Data: map[string][]byte{ - "address": []byte("http://proxy.example.com"), - "password": []byte("password"), - }, - }, - }, - expectedURL: "http://:password@proxy.example.com", - }, - { - name: "user, password", - bucket: &sourcev1.Bucket{ - Spec: sourcev1.BucketSpec{ - ProxySecretRef: &meta.LocalObjectReference{ - Name: "dummy", - }, - }, - }, - objects: []client.Object{ - &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dummy", - }, - Data: map[string][]byte{ - "address": []byte("http://proxy.example.com"), - "username": []byte("user"), - "password": []byte("password"), - }, - }, - }, - expectedURL: "http://user:password@proxy.example.com", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - c := fakeclient.NewClientBuilder(). - WithScheme(testEnv.Scheme()). - WithObjects(tt.objects...). - Build() - - r := &BucketReconciler{ - Client: c, - } - - u, err := r.getProxyURL(ctx, tt.bucket) - if tt.expectedErr == "" { - g.Expect(err).To(BeNil()) - } else { - g.Expect(err.Error()).To(ContainSubstring(tt.expectedErr)) - } - if tt.expectedURL == "" { - g.Expect(u).To(BeNil()) - } else { - g.Expect(u.String()).To(Equal(tt.expectedURL)) - } - }) - } -} - func TestBucketReconciler_APIServerValidation_STS(t *testing.T) { tests := []struct { name string diff --git a/internal/controller/gitrepository_controller.go b/internal/controller/gitrepository_controller.go index 6b68af55b..1208c8ae0 100644 --- a/internal/controller/gitrepository_controller.go +++ b/internal/controller/gitrepository_controller.go @@ -27,9 +27,11 @@ import ( "time" securejoin "github.com/cyphar/filepath-securejoin" - "github.com/fluxcd/pkg/auth/azure" + "github.com/fluxcd/pkg/auth" + authutils "github.com/fluxcd/pkg/auth/utils" "github.com/fluxcd/pkg/git/github" "github.com/fluxcd/pkg/runtime/logger" + "github.com/fluxcd/pkg/runtime/secrets" "github.com/go-git/go-git/v5/plumbing/transport" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -47,6 +49,7 @@ import ( eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/artifact/storage" "github.com/fluxcd/pkg/cache" "github.com/fluxcd/pkg/git" "github.com/fluxcd/pkg/git/gogit" @@ -57,7 +60,6 @@ import ( "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" rreconcile "github.com/fluxcd/pkg/runtime/reconcile" - "github.com/fluxcd/pkg/sourceignore" sourcev1 "github.com/fluxcd/source-controller/api/v1" @@ -69,7 +71,7 @@ import ( ) // gitRepositoryReadyCondition contains the information required to summarize a -// v1beta2.GitRepository Ready Condition. +// v1.GitRepository Ready Condition. var gitRepositoryReadyCondition = summarize.Conditions{ Target: meta.ReadyCondition, Owned: []string{ @@ -124,31 +126,29 @@ func getPatchOptions(ownedConditions []string, controllerName string) []patch.Op // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/finalizers,verbs=get;create;update;patch;delete // +kubebuilder:rbac:groups="",resources=events,verbs=create;patch -// GitRepositoryReconciler reconciles a v1beta2.GitRepository object. +// GitRepositoryReconciler reconciles a v1.GitRepository object. type GitRepositoryReconciler struct { client.Client kuberecorder.EventRecorder helper.Metrics - Storage *Storage + Storage *storage.Storage ControllerName string + TokenCache *cache.TokenCache requeueDependency time.Duration features map[string]bool patchOptions []patch.Option - - tokenCache *cache.TokenCache } type GitRepositoryReconcilerOptions struct { DependencyRequeueInterval time.Duration RateLimiter workqueue.TypedRateLimiter[reconcile.Request] - TokenCache *cache.TokenCache } // gitRepositoryReconcileFunc is the function type for all the -// v1beta2.GitRepository (sub)reconcile functions. +// v1.GitRepository (sub)reconcile functions. type gitRepositoryReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) func (r *GitRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error { @@ -164,8 +164,6 @@ func (r *GitRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, o r.features = features.FeatureGates() } - r.tokenCache = opts.TokenCache - return ctrl.NewControllerManagedBy(mgr). For(&sourcev1.GitRepository{}, builder.WithPredicates( predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}), @@ -429,7 +427,7 @@ func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patc if artifactMissing { msg += ": disappeared from storage" } - rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, msg) + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg) conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) @@ -450,23 +448,23 @@ func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patc // // The included repositories are fetched and their metadata are stored. In case // one of the included repositories isn't ready, it records -// v1beta2.IncludeUnavailableCondition=True and returns early. When all the +// v1.IncludeUnavailableCondition=True and returns early. When all the // included repositories are ready, it removes -// v1beta2.IncludeUnavailableCondition from the object. +// v1.IncludeUnavailableCondition from the object. // When the included artifactSet differs from the current set in the Status of -// the object, it marks the object with v1beta2.ArtifactOutdatedCondition=True. +// the object, it marks the object with v1.ArtifactOutdatedCondition=True. // The repository is cloned to the given dir, using the specified configuration // to check out the reference. In case of an error during this process -// (including transient errors), it records v1beta2.FetchFailedCondition=True +// (including transient errors), it records v1.FetchFailedCondition=True // and returns early. -// On a successful checkout, it removes v1beta2.FetchFailedCondition and +// On a successful checkout, it removes v1.FetchFailedCondition and // compares the current revision of HEAD to the revision of the Artifact in the -// Status of the object. It records v1beta2.ArtifactOutdatedCondition=True when +// Status of the object. It records v1.ArtifactOutdatedCondition=True when // they differ. // If specified, the signature of the Git commit is verified. If the signature // can not be verified or the verification fails, it records -// v1beta2.SourceVerifiedCondition=False and returns early. When successful, -// it records v1beta2.SourceVerifiedCondition=True. +// v1.SourceVerifiedCondition=False and returns early. When successful, +// it records v1.SourceVerifiedCondition=True. // When all the above is successful, the given Commit pointer is set to the // commit of the checked out Git repository. // @@ -485,9 +483,14 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch } var proxyOpts *transport.ProxyOptions + var proxyURL *url.URL if obj.Spec.ProxySecretRef != nil { var err error - proxyOpts, err = r.getProxyOpts(ctx, obj.Spec.ProxySecretRef.Name, obj.GetNamespace()) + secretRef := types.NamespacedName{ + Name: obj.Spec.ProxySecretRef.Name, + Namespace: obj.GetNamespace(), + } + proxyURL, err = secrets.ProxyURLFromSecretRef(ctx, r.Client, secretRef) if err != nil { e := serror.NewGeneric( fmt.Errorf("failed to configure proxy options: %w", err), @@ -497,6 +500,7 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch // Return error as the world as observed may change return sreconcile.ResultEmpty, e } + proxyOpts = &transport.ProxyOptions{URL: proxyURL.String()} } u, err := url.Parse(obj.Spec.URL) @@ -509,7 +513,7 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch return sreconcile.ResultEmpty, e } - authOpts, err := r.getAuthOpts(ctx, obj, *u) + authOpts, err := r.getAuthOpts(ctx, obj, *u, proxyURL) if err != nil { // Return error as the world as observed may change return sreconcile.ResultEmpty, err @@ -590,6 +594,16 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch ctrl.LoggerFrom(ctx).V(logger.DebugLevel).Info("git repository checked out", "url", obj.Spec.URL, "revision", commitReference(obj, commit)) conditions.Delete(obj, sourcev1.FetchFailedCondition) + // Validate sparse checkout paths after successful checkout. + if err := r.validateSparseCheckoutPaths(ctx, obj, dir); err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to sparse checkout directories : %w", err), + sourcev1.GitOperationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } + // Verify commit signature if result, err := r.verifySignature(ctx, obj, *commit); err != nil || result == sreconcile.ResultEmpty { return result, err @@ -609,35 +623,16 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch return sreconcile.ResultSuccess, nil } -// getProxyOpts fetches the secret containing the proxy settings, constructs a -// transport.ProxyOptions object using those settings and then returns it. -func (r *GitRepositoryReconciler) getProxyOpts(ctx context.Context, proxySecretName, - proxySecretNamespace string) (*transport.ProxyOptions, error) { - proxyData, err := r.getSecretData(ctx, proxySecretName, proxySecretNamespace) - if err != nil { - return nil, fmt.Errorf("failed to get proxy secret '%s/%s': %w", proxySecretNamespace, proxySecretName, err) - } - address, ok := proxyData["address"] - if !ok { - return nil, fmt.Errorf("invalid proxy secret '%s/%s': key 'address' is missing", proxySecretNamespace, proxySecretName) - } - - proxyOpts := &transport.ProxyOptions{ - URL: string(address), - Username: string(proxyData["username"]), - Password: string(proxyData["password"]), - } - return proxyOpts, nil -} - // getAuthOpts fetches the secret containing the auth options (if specified), // constructs a git.AuthOptions object using those options along with the provided // URL and returns it. -func (r *GitRepositoryReconciler) getAuthOpts(ctx context.Context, obj *sourcev1.GitRepository, u url.URL) (*git.AuthOptions, error) { +func (r *GitRepositoryReconciler) getAuthOpts(ctx context.Context, obj *sourcev1.GitRepository, + u url.URL, proxyURL *url.URL) (*git.AuthOptions, error) { + var secret *corev1.Secret var authData map[string][]byte if obj.Spec.SecretRef != nil { var err error - authData, err = r.getSecretData(ctx, obj.Spec.SecretRef.Name, obj.GetNamespace()) + secret, err = r.getSecret(ctx, obj.Spec.SecretRef.Name, obj.GetNamespace()) if err != nil { e := serror.NewGeneric( fmt.Errorf("failed to get secret '%s/%s': %w", obj.GetNamespace(), obj.Spec.SecretRef.Name, err), @@ -646,10 +641,11 @@ func (r *GitRepositoryReconciler) getAuthOpts(ctx context.Context, obj *sourcev1 conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return nil, e } + authData = secret.Data } // Configure authentication strategy to access the source - authOpts, err := git.NewAuthOptions(u, authData) + opts, err := git.NewAuthOptions(u, authData) if err != nil { e := serror.NewGeneric( fmt.Errorf("failed to configure authentication options: %w", err), @@ -659,14 +655,44 @@ func (r *GitRepositoryReconciler) getAuthOpts(ctx context.Context, obj *sourcev1 return nil, e } - // Configure provider authentication if specified in spec - switch obj.GetProvider() { - case sourcev1.GitProviderAzure: - authOpts.ProviderOpts = &git.ProviderOptions{ - Name: sourcev1.GitProviderAzure, - AzureOpts: []azure.OptFunc{ - azure.WithAzureDevOpsScope(), - }, + // Configure provider authentication if specified. + var getCreds func() (*authutils.GitCredentials, error) + switch provider := obj.GetProvider(); provider { + case sourcev1.GitProviderAzure: // If AWS or GCP are added in the future they can be added here separated by a comma. + getCreds = func() (*authutils.GitCredentials, error) { + opts := []auth.Option{ + auth.WithClient(r.Client), + auth.WithServiceAccountNamespace(obj.GetNamespace()), + } + + if obj.Spec.ServiceAccountName != "" { + // Check object-level workload identity feature gate. + if !auth.IsObjectLevelWorkloadIdentityEnabled() { + const gate = auth.FeatureGateObjectLevelWorkloadIdentity + const msgFmt = "to use spec.serviceAccountName for provider authentication please enable the %s feature gate in the controller" + err := serror.NewStalling(fmt.Errorf(msgFmt, gate), meta.FeatureGateDisabledReason) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, meta.FeatureGateDisabledReason, "%s", err) + return nil, err + } + // Set ServiceAccountName only if explicitly specified + opts = append(opts, auth.WithServiceAccountName(obj.Spec.ServiceAccountName)) + } + + if r.TokenCache != nil { + involvedObject := cache.InvolvedObject{ + Kind: sourcev1.GitRepositoryKind, + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + Operation: cache.OperationReconcile, + } + opts = append(opts, auth.WithCache(*r.TokenCache, involvedObject)) + } + + if proxyURL != nil { + opts = append(opts, auth.WithProxyURL(*proxyURL)) + } + + return authutils.GetGitCredentials(ctx, provider, opts...) } case sourcev1.GitProviderGitHub: // if provider is github, but secret ref is not specified @@ -678,18 +704,49 @@ func (r *GitRepositoryReconciler) getAuthOpts(ctx context.Context, obj *sourcev1 conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return nil, e } + authMethods, err := secrets.AuthMethodsFromSecret(ctx, secret, secrets.WithTLSSystemCertPool()) + if err != nil { + return nil, err + } + if !authMethods.HasGitHubAppData() { + e := serror.NewGeneric( + fmt.Errorf("secretRef with github app data must be specified when provider is set to github"), + sourcev1.InvalidProviderConfigurationReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + getCreds = func() (*authutils.GitCredentials, error) { + var appOpts []github.OptFunc - authOpts.ProviderOpts = &git.ProviderOptions{ - Name: sourcev1.GitProviderGitHub, - GitHubOpts: []github.OptFunc{ - github.WithAppData(authData), - github.WithCache(r.tokenCache, sourcev1.GitRepositoryKind, obj.GetName(), obj.GetNamespace()), - }, + appOpts = append(appOpts, github.WithAppData(authMethods.GitHubAppData)) + + if proxyURL != nil { + appOpts = append(appOpts, github.WithProxyURL(proxyURL)) + } + + if r.TokenCache != nil { + appOpts = append(appOpts, github.WithCache(r.TokenCache, sourcev1.GitRepositoryKind, + obj.GetName(), obj.GetNamespace(), cache.OperationReconcile)) + } + + if authMethods.HasTLS() { + appOpts = append(appOpts, github.WithTLSConfig(authMethods.TLS)) + } + + username, password, err := github.GetCredentials(ctx, appOpts...) + if err != nil { + return nil, err + } + return &authutils.GitCredentials{ + Username: username, + Password: password, + }, nil } default: // analyze secret, if it has github app data, perhaps provider should have been github. - if appID := authData[github.AppIDKey]; len(appID) != 0 { - e := serror.NewStalling( + if appID := authData[github.KeyAppID]; len(appID) != 0 { + e := serror.NewGeneric( fmt.Errorf("secretRef '%s/%s' has github app data but provider is not set to github", obj.GetNamespace(), obj.Spec.SecretRef.Name), sourcev1.InvalidProviderConfigurationReason, ) @@ -697,26 +754,46 @@ func (r *GitRepositoryReconciler) getAuthOpts(ctx context.Context, obj *sourcev1 return nil, e } } - return authOpts, nil + if getCreds != nil { + creds, err := getCreds() + if err != nil { + // Check if it's already a structured error and preserve it + switch err.(type) { + case *serror.Stalling, *serror.Generic: + return nil, err + } + + e := serror.NewGeneric( + fmt.Errorf("failed to configure authentication options: %w", err), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return nil, e + } + opts.BearerToken = creds.BearerToken + opts.Username = creds.Username + opts.Password = creds.Password + } + return opts, nil } -func (r *GitRepositoryReconciler) getSecretData(ctx context.Context, name, namespace string) (map[string][]byte, error) { +func (r *GitRepositoryReconciler) getSecret(ctx context.Context, name, namespace string) (*corev1.Secret, error) { key := types.NamespacedName{ Namespace: namespace, Name: name, } - var secret corev1.Secret - if err := r.Client.Get(ctx, key, &secret); err != nil { - return nil, err + secret := &corev1.Secret{} + if err := r.Client.Get(ctx, key, secret); err != nil { + return nil, fmt.Errorf("failed to get secret '%s/%s': %w", namespace, name, err) } - return secret.Data, nil + return secret, nil } // reconcileArtifact archives a new Artifact to the Storage, if the current // (Status) data on the object does not match the given. // // The inspection of the given data to the object is differed, ensuring any -// stale observations like v1beta2.ArtifactOutdatedCondition are removed. +// stale observations like v1.ArtifactOutdatedCondition are removed. // If the given Artifact and/or artifactSet (includes) and observed artifact // content config do not differ from the object's current, it returns early. // Source ignore patterns are loaded, and the given directory is archived while @@ -797,7 +874,7 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pat } // Archive directory to storage - if err := r.Storage.Archive(&artifact, dir, SourceIgnoreFilter(ps, ignoreDomain)); err != nil { + if err := r.Storage.Archive(&artifact, dir, storage.SourceIgnoreFilter(ps, ignoreDomain)); err != nil { e := serror.NewGeneric( fmt.Errorf("unable to archive artifact to storage: %w", err), sourcev1.ArchiveOperationFailedReason, @@ -812,6 +889,7 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pat obj.Status.ObservedIgnore = obj.Spec.Ignore obj.Status.ObservedRecurseSubmodules = obj.Spec.RecurseSubmodules obj.Status.ObservedInclude = obj.Spec.Include + obj.Status.ObservedSparseCheckout = obj.Spec.SparseCheckout // Remove the deprecated symlink. // TODO(hidde): remove 2 minor versions from introduction of v1. @@ -831,15 +909,15 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pat } // reconcileInclude reconciles the on the object specified -// v1beta2.GitRepositoryInclude list by copying their Artifact (sub)contents to +// v1.GitRepositoryInclude list by copying their Artifact (sub)contents to // the specified paths in the given directory. // // When one of the includes is unavailable, it marks the object with -// v1beta2.IncludeUnavailableCondition=True and returns early. +// v1.IncludeUnavailableCondition=True and returns early. // When the copy operations are successful, it removes the -// v1beta2.IncludeUnavailableCondition from the object. +// v1.IncludeUnavailableCondition from the object. // When the composed artifactSet differs from the current set in the Status of -// the object, it marks the object with v1beta2.ArtifactOutdatedCondition=True. +// the object, it marks the object with v1.ArtifactOutdatedCondition=True. func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.GitRepository, _ *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) { @@ -859,7 +937,7 @@ func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, sp *patc // such that the index of artifactSet matches with the index of Include. // Hence, index is used here to pick the associated artifact from // includes. - var artifact *sourcev1.Artifact + var artifact *meta.Artifact for j, art := range *includes { if i == j { artifact = art @@ -884,6 +962,7 @@ func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, sp *patc // performs a git checkout. func (r *GitRepositoryReconciler) gitCheckout(ctx context.Context, obj *sourcev1.GitRepository, authOpts *git.AuthOptions, proxyOpts *transport.ProxyOptions, dir string, optimized bool) (*git.Commit, error) { + // Configure checkout strategy. cloneOpts := repository.CloneConfig{ RecurseSubmodules: obj.Spec.RecurseSubmodules, @@ -896,7 +975,14 @@ func (r *GitRepositoryReconciler) gitCheckout(ctx context.Context, obj *sourcev1 cloneOpts.SemVer = ref.SemVer cloneOpts.RefName = ref.Name } - + if obj.Spec.SparseCheckout != nil { + // Trim any leading "./" in the directory paths since underlying go-git API does not honor them. + sparseCheckoutDirs := make([]string, len(obj.Spec.SparseCheckout)) + for i, path := range obj.Spec.SparseCheckout { + sparseCheckoutDirs[i] = strings.TrimPrefix(path, "./") + } + cloneOpts.SparseCheckoutDirectories = sparseCheckoutDirs + } // Only if the object has an existing artifact in storage, attempt to // short-circuit clone operation. reconcileStorage has already verified // that the artifact exists. @@ -980,10 +1066,10 @@ func (r *GitRepositoryReconciler) fetchIncludes(ctx context.Context, obj *source // verifySignature verifies the signature of the given Git commit and/or its referencing tag // depending on the verification mode specified on the object. // If the signature can not be verified or the verification fails, it records -// v1beta2.SourceVerifiedCondition=False and returns. -// When successful, it records v1beta2.SourceVerifiedCondition=True. +// v1.SourceVerifiedCondition=False and returns. +// When successful, it records v1.SourceVerifiedCondition=True. // If no verification mode is specified on the object, the -// v1beta2.SourceVerifiedCondition Condition is removed. +// v1.SourceVerifiedCondition Condition is removed. func (r *GitRepositoryReconciler) verifySignature(ctx context.Context, obj *sourcev1.GitRepository, commit git.Commit) (sreconcile.Result, error) { // Check if there is a commit verification is configured and remove any old // observations if there is none @@ -1079,7 +1165,7 @@ func (r *GitRepositoryReconciler) verifySignature(ctx context.Context, obj *sour mode := obj.Spec.Verification.GetMode() obj.Status.SourceVerificationMode = &mode conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, reason, "%s", message.String()) - r.eventLogf(ctx, obj, eventv1.EventTypeTrace, reason, message.String()) + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, reason, "%s", message.String()) return sreconcile.ResultSuccess, nil } @@ -1097,7 +1183,8 @@ func (r *GitRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sour controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) // Cleanup caches. - r.tokenCache.DeleteEventsForObject(sourcev1.GitRepositoryKind, obj.GetName(), obj.GetNamespace()) + r.TokenCache.DeleteEventsForObject(sourcev1.GitRepositoryKind, + obj.GetName(), obj.GetNamespace(), cache.OperationReconcile) // Stop reconciliation as the object is being deleted return sreconcile.ResultEmpty, nil @@ -1132,7 +1219,7 @@ func (r *GitRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourc } if len(delFiles) > 0 { r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", - fmt.Sprintf("garbage collected %d artifacts", len(delFiles))) + "garbage collected %d artifacts", len(delFiles)) return nil } } @@ -1172,10 +1259,18 @@ func gitContentConfigChanged(obj *sourcev1.GitRepository, includes *artifactSet) if requiresVerification(obj) { return true } + if len(obj.Spec.SparseCheckout) != len(obj.Status.ObservedSparseCheckout) { + return true + } + for index, dir := range obj.Spec.SparseCheckout { + if dir != obj.Status.ObservedSparseCheckout[index] { + return true + } + } // Convert artifactSet to index addressable artifacts and ensure that it and // the included artifacts include all the include from the spec. - artifacts := []*sourcev1.Artifact(*includes) + artifacts := []*meta.Artifact(*includes) if len(obj.Spec.Include) != len(artifacts) { return true } @@ -1206,6 +1301,19 @@ func gitContentConfigChanged(obj *sourcev1.GitRepository, includes *artifactSet) return false } +// validateSparseCheckoutPaths checks if the sparse checkout paths exist in the cloned repository. +func (r *GitRepositoryReconciler) validateSparseCheckoutPaths(ctx context.Context, obj *sourcev1.GitRepository, dir string) error { + if obj.Spec.SparseCheckout != nil { + for _, path := range obj.Spec.SparseCheckout { + fullPath := filepath.Join(dir, path) + if _, err := os.Lstat(fullPath); err != nil { + return fmt.Errorf("sparse checkout dir '%s' does not exist in repository: %w", path, err) + } + } + } + return nil +} + // Returns true if both GitRepositoryIncludes are equal. func gitRepositoryIncludeEqual(a, b sourcev1.GitRepositoryInclude) bool { if a.GitRepositoryRef != b.GitRepositoryRef { diff --git a/internal/controller/gitrepository_controller_fuzz_test.go b/internal/controller/gitrepository_controller_fuzz_test.go index 1751d096e..c9c136820 100644 --- a/internal/controller/gitrepository_controller_fuzz_test.go +++ b/internal/controller/gitrepository_controller_fuzz_test.go @@ -59,6 +59,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/manager" + intstorage "github.com/fluxcd/pkg/artifact/digest" "github.com/fluxcd/pkg/gittestserver" "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/testenv" @@ -77,7 +78,7 @@ var ( cfg *rest.Config testEnv *testenv.Environment - storage *Storage + storage *intstorage.Storage examplePublicKey []byte examplePrivateKey []byte @@ -477,7 +478,7 @@ func startEnvServer(setupReconcilers func(manager.Manager)) *envtest.Environment panic(err) } defer os.RemoveAll(tmpStoragePath) - storage, err = NewStorage(tmpStoragePath, "localhost:5050", time.Minute*1, 2) + storage, err = intstorage.New(tmpStoragePath, "localhost:5050", time.Minute*1, 2) if err != nil { panic(err) } diff --git a/internal/controller/gitrepository_controller_test.go b/internal/controller/gitrepository_controller_test.go index fde0262f1..f9f7a591d 100644 --- a/internal/controller/gitrepository_controller_test.go +++ b/internal/controller/gitrepository_controller_test.go @@ -18,6 +18,7 @@ package controller import ( "context" + "encoding/json" "errors" "fmt" "net/http" @@ -33,7 +34,6 @@ import ( "github.com/go-git/go-git/v5/config" "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/plumbing/transport" "github.com/go-git/go-git/v5/storage/memory" . "github.com/onsi/gomega" sshtestdata "golang.org/x/crypto/ssh/testdata" @@ -48,6 +48,8 @@ import ( kstatus "github.com/fluxcd/cli-utils/pkg/kstatus/status" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" "github.com/fluxcd/pkg/git" "github.com/fluxcd/pkg/git/github" "github.com/fluxcd/pkg/gittestserver" @@ -348,6 +350,8 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { server options secret *corev1.Secret beforeFunc func(obj *sourcev1.GitRepository) + secretFunc func(secret *corev1.Secret, baseURL string) + middlewareFunc gittestserver.HTTPMiddleware want sreconcile.Result wantErr bool assertConditions []metav1.Condition @@ -386,6 +390,63 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), }, }, + { + name: "HTTPS with mutual TLS makes Reconciling=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mtls-certs", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + "tls.crt": clientPublicKey, + "tls.key": clientPrivateKey, + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "mtls-certs"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + }, + }, + { + name: "HTTPS with mutual TLS and invalid private key makes CheckoutFailed=True and returns error", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-mtls-certs", + }, + Data: map[string][]byte{ + "ca.crt": tlsCA, + "tls.crt": clientPublicKey, + "tls.key": []byte("invalid"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "invalid-mtls-certs"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "tls: failed to find any PEM data in key input"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + }, + }, { name: "HTTPS with CAFile secret makes Reconciling=True", protocol: "https", @@ -470,6 +531,85 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), }, }, + { + name: "mTLS GitHub App without ca.crt makes FetchFailed=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "gh-app-no-ca"}, + Data: map[string][]byte{ + github.KeyAppID: []byte("123"), + github.KeyAppInstallationID: []byte("456"), + github.KeyAppPrivateKey: sshtestdata.PEMBytes["rsa"], + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGitHub + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "gh-app-no-ca"} + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingWithRetryReason, "foo") + }, + secretFunc: func(secret *corev1.Secret, baseURL string) { + secret.Data[github.KeyAppBaseURL] = []byte(baseURL + "/api/v3") + }, + wantErr: true, + assertConditions: []metav1.Condition{ + // should record a FetchFailedCondition due to TLS handshake + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "x509: "), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingWithRetryReason, "foo"), + }, + }, + { + name: "mTLS GitHub App with ca.crt makes Reconciling=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + username: github.AccessTokenUsername, + password: "some-enterprise-token", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "gh-app-ca"}, + Data: map[string][]byte{ + github.KeyAppID: []byte("123"), + github.KeyAppInstallationID: []byte("456"), + github.KeyAppPrivateKey: sshtestdata.PEMBytes["rsa"], + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGitHub + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "gh-app-ca"} + }, + secretFunc: func(secret *corev1.Secret, baseURL string) { + secret.Data[github.KeyAppBaseURL] = []byte(baseURL + "/api/v3") + secret.Data["ca.crt"] = tlsCA + }, + middlewareFunc: func(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if strings.HasPrefix(r.URL.Path, "/api/v3/app/installations/") { + w.WriteHeader(http.StatusOK) + tok := &github.AppToken{ + Token: "some-enterprise-token", + ExpiresAt: time.Now().Add(time.Hour), + } + _ = json.NewEncoder(w).Encode(tok) + } + handler.ServeHTTP(w, r) + }) + }, + wantErr: false, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new upstream revision 'master@sha1:'"), + }, + }, // TODO: Add test case for HTTPS with bearer token auth secret. It // depends on gitkit to have support for bearer token based // authentication. @@ -559,7 +699,7 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { beforeFunc: func(obj *sourcev1.GitRepository) { obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"} obj.Status = sourcev1.GitRepositoryStatus{ - Artifact: &sourcev1.Artifact{ + Artifact: &meta.Artifact{ Revision: "staging/some-revision", Path: randStringRunes(10), }, @@ -600,7 +740,7 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { Name: "github-app-secret", }, Data: map[string][]byte{ - github.AppIDKey: []byte("1111"), + github.KeyAppID: []byte("1111"), }, }, beforeFunc: func(obj *sourcev1.GitRepository) { @@ -616,6 +756,34 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), }, }, + { + // This test is only for verifying the failure state when using + // provider auth. Protocol http is used for simplicity. + name: "github provider without github app data in secret makes FetchFailed=True", + protocol: "http", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "github-basic-auth", + }, + Data: map[string][]byte{ + "username": []byte("abc"), + "password": []byte("1234"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "github-basic-auth"} + obj.Spec.Provider = sourcev1.GitProviderGitHub + conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") + conditions.MarkUnknown(obj, meta.ReadyCondition, meta.ProgressingReason, "foo") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.InvalidProviderConfigurationReason, "secretRef with github app data must be specified when provider is set to github"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "foo"), + }, + }, } for _, tt := range tests { @@ -638,6 +806,10 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { defer os.RemoveAll(server.Root()) server.AutoCreate() + if tt.middlewareFunc != nil { + server.AddHTTPMiddlewares(tt.middlewareFunc) + } + repoPath := "/test.git" localRepo, err := initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath) g.Expect(err).NotTo(HaveOccurred()) @@ -682,6 +854,10 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { tt.beforeFunc(obj) } + if tt.secretFunc != nil { + tt.secretFunc(secret, server.HTTPAddress()) + } + clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). WithStatusSubresource(&sourcev1.GitRepository{}) @@ -730,12 +906,11 @@ func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { func TestGitRepositoryReconciler_getAuthOpts_provider(t *testing.T) { tests := []struct { - name string - url string - secret *corev1.Secret - beforeFunc func(obj *sourcev1.GitRepository) - wantProviderOptsName string - wantErr error + name string + url string + secret *corev1.Secret + beforeFunc func(obj *sourcev1.GitRepository) + wantErr string }{ { name: "azure provider", @@ -743,7 +918,16 @@ func TestGitRepositoryReconciler_getAuthOpts_provider(t *testing.T) { beforeFunc: func(obj *sourcev1.GitRepository) { obj.Spec.Provider = sourcev1.GitProviderAzure }, - wantProviderOptsName: sourcev1.GitProviderAzure, + wantErr: "ManagedIdentityCredential", + }, + { + name: "azure provider with service account and feature gate for object-level identity disabled", + url: "https://dev.azure.com/foo/bar/_git/baz", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderAzure + obj.Spec.ServiceAccountName = "azure-sa" + }, + wantErr: auth.FeatureGateObjectLevelWorkloadIdentity, }, { name: "github provider with no secret ref", @@ -751,8 +935,7 @@ func TestGitRepositoryReconciler_getAuthOpts_provider(t *testing.T) { beforeFunc: func(obj *sourcev1.GitRepository) { obj.Spec.Provider = sourcev1.GitProviderGitHub }, - wantProviderOptsName: sourcev1.GitProviderGitHub, - wantErr: errors.New("secretRef with github app data must be specified when provider is set to github"), + wantErr: "secretRef with github app data must be specified when provider is set to github", }, { name: "github provider with github app data in secret", @@ -762,9 +945,9 @@ func TestGitRepositoryReconciler_getAuthOpts_provider(t *testing.T) { Name: "githubAppSecret", }, Data: map[string][]byte{ - github.AppIDKey: []byte("123"), - github.AppInstallationIDKey: []byte("456"), - github.AppPrivateKey: []byte("abc"), + github.KeyAppID: []byte("123"), + github.KeyAppInstallationID: []byte("456"), + github.KeyAppPrivateKey: []byte("abc"), }, }, beforeFunc: func(obj *sourcev1.GitRepository) { @@ -773,7 +956,7 @@ func TestGitRepositoryReconciler_getAuthOpts_provider(t *testing.T) { Name: "githubAppSecret", } }, - wantProviderOptsName: sourcev1.GitProviderGitHub, + wantErr: "Key must be a PEM encoded PKCS1 or PKCS8 key", }, { name: "generic provider with github app data in secret", @@ -783,7 +966,7 @@ func TestGitRepositoryReconciler_getAuthOpts_provider(t *testing.T) { Name: "githubAppSecret", }, Data: map[string][]byte{ - github.AppIDKey: []byte("123"), + github.KeyAppID: []byte("123"), }, }, beforeFunc: func(obj *sourcev1.GitRepository) { @@ -792,7 +975,27 @@ func TestGitRepositoryReconciler_getAuthOpts_provider(t *testing.T) { Name: "githubAppSecret", } }, - wantErr: errors.New("secretRef '/githubAppSecret' has github app data but provider is not set to github"), + wantErr: "secretRef '/githubAppSecret' has github app data but provider is not set to github", + }, + { + name: "github provider with basic auth secret", + url: "https://github.com/org/repo.git", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "basic-auth-secret", + }, + Data: map[string][]byte{ + "username": []byte("abc"), + "password": []byte("1234"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Provider = sourcev1.GitProviderGitHub + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "basic-auth-secret", + } + }, + wantErr: "secretRef with github app data must be specified when provider is set to github", }, { name: "generic provider", @@ -809,7 +1012,7 @@ func TestGitRepositoryReconciler_getAuthOpts_provider(t *testing.T) { Name: "authSecret", } }, - wantErr: errors.New("failed to get secret '/authSecret': secrets \"authSecret\" not found"), + wantErr: "failed to get secret '/authSecret': secrets \"authSecret\" not found", }, { url: "https://example.com/org/repo", @@ -842,20 +1045,19 @@ func TestGitRepositoryReconciler_getAuthOpts_provider(t *testing.T) { if tt.beforeFunc != nil { tt.beforeFunc(obj) } - opts, err := r.getAuthOpts(context.TODO(), obj, *url) + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + opts, err := r.getAuthOpts(ctx, obj, *url, nil) - if tt.wantErr != nil { + if tt.wantErr != "" { g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error())) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) } else { g.Expect(err).ToNot(HaveOccurred()) g.Expect(opts).ToNot(BeNil()) - if tt.wantProviderOptsName != "" { - g.Expect(opts.ProviderOpts).ToNot(BeNil()) - g.Expect(opts.ProviderOpts.Name).To(Equal(tt.wantProviderOptsName)) - } else { - g.Expect(opts.ProviderOpts).To(BeNil()) - } + g.Expect(opts.BearerToken).To(BeEmpty()) + g.Expect(opts.Username).To(BeEmpty()) + g.Expect(opts.Password).To(BeEmpty()) } }) } @@ -964,7 +1166,7 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) }, beforeFunc: func(obj *sourcev1.GitRepository, latestRev string) { obj.Status = sourcev1.GitRepositoryStatus{ - Artifact: &sourcev1.Artifact{ + Artifact: &meta.Artifact{ Revision: "staging/some-revision", Path: randStringRunes(10), }, @@ -985,7 +1187,7 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) beforeFunc: func(obj *sourcev1.GitRepository, latestRev string) { // Add existing artifact on the object and storage. obj.Status = sourcev1.GitRepositoryStatus{ - Artifact: &sourcev1.Artifact{ + Artifact: &meta.Artifact{ Revision: "staging@sha1:" + latestRev, Path: randStringRunes(10), }, @@ -1008,7 +1210,7 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) obj.Spec.Ignore = ptr.To("foo") // Add existing artifact on the object and storage. obj.Status = sourcev1.GitRepositoryStatus{ - Artifact: &sourcev1.Artifact{ + Artifact: &meta.Artifact{ Revision: "staging@sha1:" + latestRev, Path: randStringRunes(10), }, @@ -1139,7 +1341,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { { name: "Archiving artifact to storage with includes makes ArtifactInStorage=True", dir: "testdata/git/repository", - includes: artifactSet{&sourcev1.Artifact{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91"}}, + includes: artifactSet{&meta.Artifact{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91"}}, beforeFunc: func(obj *sourcev1.GitRepository) { obj.Spec.Interval = metav1.Duration{Duration: interval} obj.Spec.Include = []sourcev1.GitRepositoryInclude{ @@ -1159,14 +1361,14 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { { name: "Up-to-date artifact should not update status", dir: "testdata/git/repository", - includes: artifactSet{&sourcev1.Artifact{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91", Digest: "some-checksum"}}, + includes: artifactSet{&meta.Artifact{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91", Digest: "some-checksum"}}, beforeFunc: func(obj *sourcev1.GitRepository) { obj.Spec.Interval = metav1.Duration{Duration: interval} obj.Spec.Include = []sourcev1.GitRepositoryInclude{ {GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}}, } - obj.Status.Artifact = &sourcev1.Artifact{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91"} - obj.Status.IncludedArtifacts = []*sourcev1.Artifact{{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91", Digest: "some-checksum"}} + obj.Status.Artifact = &meta.Artifact{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91"} + obj.Status.IncludedArtifacts = []*meta.Artifact{{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91", Digest: "some-checksum"}} obj.Status.ObservedInclude = obj.Spec.Include }, want: sreconcile.ResultSuccess, @@ -1301,6 +1503,8 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { server, err := testserver.NewTempArtifactServer() g.Expect(err).NotTo(HaveOccurred()) + server.Start() + defer server.Stop() storage, err := newTestStorage(server.HTTPServer) g.Expect(err).NotTo(HaveOccurred()) defer os.RemoveAll(storage.BasePath) @@ -1385,7 +1589,7 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { }, } if d.withArtifact { - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: d.name + ".tar.gz", Revision: d.name, LastUpdateTime: metav1.Now(), @@ -1477,20 +1681,20 @@ func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { func TestGitRepositoryReconciler_reconcileStorage(t *testing.T) { tests := []struct { name string - beforeFunc func(obj *sourcev1.GitRepository, storage *Storage) error + beforeFunc func(obj *sourcev1.GitRepository, storage *storage.Storage) error want sreconcile.Result wantErr bool - assertArtifact *sourcev1.Artifact + assertArtifact *meta.Artifact assertConditions []metav1.Condition assertPaths []string }{ { name: "garbage collects", - beforeFunc: func(obj *sourcev1.GitRepository, storage *Storage) error { + beforeFunc: func(obj *sourcev1.GitRepository, storage *storage.Storage) error { revisions := []string{"a", "b", "c", "d"} for n := range revisions { v := revisions[n] - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), Revision: v, } @@ -1508,7 +1712,7 @@ func TestGitRepositoryReconciler_reconcileStorage(t *testing.T) { conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") return nil }, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Path: "/reconcile-storage/d.txt", Revision: "d", Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", @@ -1536,8 +1740,8 @@ func TestGitRepositoryReconciler_reconcileStorage(t *testing.T) { }, { name: "notices missing artifact in storage", - beforeFunc: func(obj *sourcev1.GitRepository, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.GitRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ Path: "/reconcile-storage/invalid.txt", Revision: "e", } @@ -1555,10 +1759,10 @@ func TestGitRepositoryReconciler_reconcileStorage(t *testing.T) { }, { name: "notices empty artifact digest", - beforeFunc: func(obj *sourcev1.GitRepository, storage *Storage) error { + beforeFunc: func(obj *sourcev1.GitRepository, storage *storage.Storage) error { f := "empty-digest.txt" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), Revision: "fake", } @@ -1586,10 +1790,10 @@ func TestGitRepositoryReconciler_reconcileStorage(t *testing.T) { }, { name: "notices artifact digest mismatch", - beforeFunc: func(obj *sourcev1.GitRepository, storage *Storage) error { + beforeFunc: func(obj *sourcev1.GitRepository, storage *storage.Storage) error { f := "digest-mismatch.txt" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), Revision: "fake", } @@ -1617,8 +1821,8 @@ func TestGitRepositoryReconciler_reconcileStorage(t *testing.T) { }, { name: "updates hostname on diff from current", - beforeFunc: func(obj *sourcev1.GitRepository, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.GitRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ Path: "/reconcile-storage/hostname.txt", Revision: "f", Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", @@ -1637,7 +1841,7 @@ func TestGitRepositoryReconciler_reconcileStorage(t *testing.T) { assertPaths: []string{ "/reconcile-storage/hostname.txt", }, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Path: "/reconcile-storage/hostname.txt", Revision: "f", Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", @@ -2175,78 +2379,6 @@ func TestGitRepositoryReconciler_verifySignature(t *testing.T) { } } -func TestGitRepositoryReconciler_getProxyOpts(t *testing.T) { - invalidProxy := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "invalid-proxy", - Namespace: "default", - }, - Data: map[string][]byte{ - "url": []byte("https://example.com"), - }, - } - validProxy := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "valid-proxy", - Namespace: "default", - }, - Data: map[string][]byte{ - "address": []byte("https://example.com"), - "username": []byte("user"), - "password": []byte("pass"), - }, - } - - clientBuilder := fakeclient.NewClientBuilder(). - WithScheme(testEnv.GetScheme()). - WithObjects(invalidProxy, validProxy) - - r := &GitRepositoryReconciler{ - Client: clientBuilder.Build(), - } - - tests := []struct { - name string - secret string - err string - proxyOpts *transport.ProxyOptions - }{ - { - name: "non-existent secret", - secret: "non-existent", - err: "failed to get proxy secret 'default/non-existent': ", - }, - { - name: "invalid proxy secret", - secret: "invalid-proxy", - err: "invalid proxy secret 'default/invalid-proxy': key 'address' is missing", - }, - { - name: "valid proxy secret", - secret: "valid-proxy", - proxyOpts: &transport.ProxyOptions{ - URL: "https://example.com", - Username: "user", - Password: "pass", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - opts, err := r.getProxyOpts(context.TODO(), tt.secret, "default") - if opts != nil { - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(opts).To(Equal(tt.proxyOpts)) - } else { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring(tt.err)) - } - }) - } -} - func TestGitRepositoryReconciler_ConditionsUpdate(t *testing.T) { g := NewWithT(t) @@ -2669,7 +2801,7 @@ func TestGitRepositoryReconciler_notify(t *testing.T) { res: sreconcile.ResultSuccess, resErr: nil, newObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} }, commit: concreteCommit, wantEvent: "Normal NewArtifact stored artifact for commit 'test commit'", @@ -2679,12 +2811,12 @@ func TestGitRepositoryReconciler_notify(t *testing.T) { res: sreconcile.ResultSuccess, resErr: nil, oldObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, newObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, commit: concreteCommit, @@ -2695,12 +2827,12 @@ func TestGitRepositoryReconciler_notify(t *testing.T) { res: sreconcile.ResultSuccess, resErr: nil, oldObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, newObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Digest: "bbb"} + obj.Status.Artifact = &meta.Artifact{Revision: "aaa", Digest: "bbb"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, commit: concreteCommit, @@ -2711,11 +2843,11 @@ func TestGitRepositoryReconciler_notify(t *testing.T) { res: sreconcile.ResultSuccess, resErr: nil, oldObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, newObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, }, @@ -2724,12 +2856,12 @@ func TestGitRepositoryReconciler_notify(t *testing.T) { res: sreconcile.ResultEmpty, resErr: noopErr, oldObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, newObjBeforeFunc: func(obj *sourcev1.GitRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, commit: partialCommit, // no-op will always result in partial commit. @@ -2820,7 +2952,7 @@ func TestGitRepositoryReconciler_fetchIncludes(t *testing.T) { {name: "b", toPath: "b/", shouldExist: true}, }, wantErr: false, - wantArtifactSet: []*sourcev1.Artifact{ + wantArtifactSet: []*meta.Artifact{ {Revision: "a"}, {Revision: "b"}, }, @@ -2878,7 +3010,7 @@ func TestGitRepositoryReconciler_fetchIncludes(t *testing.T) { }, } if d.withArtifact { - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: d.name + ".tar.gz", Revision: d.name, LastUpdateTime: metav1.Now(), @@ -3036,7 +3168,7 @@ func TestGitContentConfigChanged(t *testing.T) { tests := []struct { name string obj sourcev1.GitRepository - artifacts []*sourcev1.Artifact + artifacts []*meta.Artifact want bool }{ { @@ -3073,6 +3205,38 @@ func TestGitContentConfigChanged(t *testing.T) { }, want: false, }, + { + name: "unobserved sparse checkout", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{SparseCheckout: []string{"a/b/c", "x/y/z"}}, + Status: sourcev1.GitRepositoryStatus{ObservedSparseCheckout: []string{"a/b/c"}}, + }, + want: true, + }, + { + name: "unobserved case sensitive sparse checkout", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{SparseCheckout: []string{"a/b/c", "x/y/Z"}}, + Status: sourcev1.GitRepositoryStatus{ObservedSparseCheckout: []string{"a/b/c", "x/y/z"}}, + }, + want: true, + }, + { + name: "observed sparse checkout", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{SparseCheckout: []string{"a/b/c", "x/y/z"}}, + Status: sourcev1.GitRepositoryStatus{ObservedSparseCheckout: []string{"a/b/c", "x/y/z"}}, + }, + want: false, + }, + { + name: "observed sparse checkout with leading slash", + obj: sourcev1.GitRepository{ + Spec: sourcev1.GitRepositorySpec{SparseCheckout: []string{"./a/b/c", "./x/y/z"}}, + Status: sourcev1.GitRepositoryStatus{ObservedSparseCheckout: []string{"./a/b/c", "./x/y/z"}}, + }, + want: false, + }, { name: "unobserved include", obj: sourcev1.GitRepository{ @@ -3104,10 +3268,10 @@ func TestGitContentConfigChanged(t *testing.T) { ToPath: "baz", }, }, - IncludedArtifacts: []*sourcev1.Artifact{{Revision: "aaa", Digest: "bbb"}}, + IncludedArtifacts: []*meta.Artifact{{Revision: "aaa", Digest: "bbb"}}, }, }, - artifacts: []*sourcev1.Artifact{ + artifacts: []*meta.Artifact{ {Revision: "aaa", Digest: "bbb"}, }, want: false, @@ -3132,10 +3296,10 @@ func TestGitContentConfigChanged(t *testing.T) { ToPath: "baz", }, }, - IncludedArtifacts: []*sourcev1.Artifact{{Revision: "aaa", Digest: "bbb"}}, + IncludedArtifacts: []*meta.Artifact{{Revision: "aaa", Digest: "bbb"}}, }, }, - artifacts: []*sourcev1.Artifact{ + artifacts: []*meta.Artifact{ {Revision: "ccc", Digest: "bbb"}, }, want: true, @@ -3160,10 +3324,10 @@ func TestGitContentConfigChanged(t *testing.T) { ToPath: "baz", }, }, - IncludedArtifacts: []*sourcev1.Artifact{{Revision: "aaa", Digest: "bbb"}}, + IncludedArtifacts: []*meta.Artifact{{Revision: "aaa", Digest: "bbb"}}, }, }, - artifacts: []*sourcev1.Artifact{ + artifacts: []*meta.Artifact{ {Revision: "aaa", Digest: "ddd"}, }, want: true, @@ -3188,10 +3352,10 @@ func TestGitContentConfigChanged(t *testing.T) { ToPath: "baz", }, }, - IncludedArtifacts: []*sourcev1.Artifact{{Revision: "aaa", Digest: "bbb"}}, + IncludedArtifacts: []*meta.Artifact{{Revision: "aaa", Digest: "bbb"}}, }, }, - artifacts: []*sourcev1.Artifact{ + artifacts: []*meta.Artifact{ {Revision: "aaa", Digest: "bbb"}, }, want: true, @@ -3214,13 +3378,13 @@ func TestGitContentConfigChanged(t *testing.T) { }, }, Status: sourcev1.GitRepositoryStatus{ - IncludedArtifacts: []*sourcev1.Artifact{ + IncludedArtifacts: []*meta.Artifact{ {Revision: "aaa", Digest: "bbb"}, {Revision: "ccc", Digest: "ccc"}, }, }, }, - artifacts: []*sourcev1.Artifact{ + artifacts: []*meta.Artifact{ {Revision: "aaa", Digest: "bbb"}, {Revision: "ccc", Digest: "ddd"}, }, @@ -3256,13 +3420,13 @@ func TestGitContentConfigChanged(t *testing.T) { ToPath: "baz", }, }, - IncludedArtifacts: []*sourcev1.Artifact{ + IncludedArtifacts: []*meta.Artifact{ {Revision: "aaa", Digest: "bbb"}, {Revision: "ccc", Digest: "ccc"}, }, }, }, - artifacts: []*sourcev1.Artifact{ + artifacts: []*meta.Artifact{ {Revision: "aaa", Digest: "bbb"}, }, want: true, @@ -3297,12 +3461,12 @@ func TestGitContentConfigChanged(t *testing.T) { ToPath: "baz", }, }, - IncludedArtifacts: []*sourcev1.Artifact{ + IncludedArtifacts: []*meta.Artifact{ {Revision: "aaa", Digest: "bbb"}, }, }, }, - artifacts: []*sourcev1.Artifact{ + artifacts: []*meta.Artifact{ {Revision: "aaa", Digest: "bbb"}, {Revision: "ccc", Digest: "ccc"}, }, diff --git a/internal/controller/helmchart_controller.go b/internal/controller/helmchart_controller.go index a25d287b8..e969bf67a 100644 --- a/internal/controller/helmchart_controller.go +++ b/internal/controller/helmchart_controller.go @@ -55,6 +55,7 @@ import ( eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/artifact/storage" "github.com/fluxcd/pkg/git" "github.com/fluxcd/pkg/runtime/conditions" helper "github.com/fluxcd/pkg/runtime/controller" @@ -70,7 +71,6 @@ import ( "github.com/fluxcd/source-controller/internal/helm/chart" "github.com/fluxcd/source-controller/internal/helm/getter" "github.com/fluxcd/source-controller/internal/helm/repository" - "github.com/fluxcd/source-controller/internal/oci" soci "github.com/fluxcd/source-controller/internal/oci" scosign "github.com/fluxcd/source-controller/internal/oci/cosign" "github.com/fluxcd/source-controller/internal/oci/notation" @@ -133,7 +133,7 @@ type HelmChartReconciler struct { helper.Metrics RegistryClientGenerator RegistryClientGeneratorFunc - Storage *Storage + Storage *storage.Storage Getters helmgetter.Providers ControllerName string @@ -404,7 +404,7 @@ func (r *HelmChartReconciler) reconcileStorage(ctx context.Context, sp *patch.Se if artifactMissing { msg += ": disappeared from storage" } - rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, msg) + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg) conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) @@ -697,7 +697,7 @@ func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj * // v1.Artifact. // In case of a failure it records v1.FetchFailedCondition on the chart // object, and returns early. -func (r *HelmChartReconciler) buildFromTarballArtifact(ctx context.Context, obj *sourcev1.HelmChart, source sourcev1.Artifact, b *chart.Build) (sreconcile.Result, error) { +func (r *HelmChartReconciler) buildFromTarballArtifact(ctx context.Context, obj *sourcev1.HelmChart, source meta.Artifact, b *chart.Build) (sreconcile.Result, error) { // Create temporary working directory tmpDir, err := util.TempDirForObj("", obj) if err != nil { @@ -984,7 +984,7 @@ func (r *HelmChartReconciler) garbageCollect(ctx context.Context, obj *sourcev1. } if len(delFiles) > 0 { r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", - fmt.Sprintf("garbage collected %d artifacts", len(delFiles))) + "garbage collected %d artifacts", len(delFiles)) return nil } } @@ -1255,7 +1255,7 @@ func observeChartBuild(ctx context.Context, sp *patch.SerialPatcher, pOpts []pat if build.Complete() { conditions.Delete(obj, sourcev1.FetchFailedCondition) conditions.Delete(obj, sourcev1.BuildFailedCondition) - if build.VerifiedResult == oci.VerificationResultSuccess { + if build.VerifiedResult == soci.VerificationResultSuccess { conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of version %s", build.Version) } } diff --git a/internal/controller/helmchart_controller_test.go b/internal/controller/helmchart_controller_test.go index 6bc1e890b..190a9f8b5 100644 --- a/internal/controller/helmchart_controller_test.go +++ b/internal/controller/helmchart_controller_test.go @@ -34,6 +34,8 @@ import ( "testing" "time" + "github.com/fluxcd/pkg/artifact/config" + "github.com/fluxcd/pkg/artifact/digest" "github.com/notaryproject/notation-core-go/signature/cose" "github.com/notaryproject/notation-core-go/testhelper" "github.com/notaryproject/notation-go" @@ -50,6 +52,8 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/record" oras "oras.land/oras-go/v2/registry/remote" ctrl "sigs.k8s.io/controller-runtime" @@ -59,6 +63,7 @@ import ( kstatus "github.com/fluxcd/cli-utils/pkg/kstatus/status" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/artifact/storage" "github.com/fluxcd/pkg/helmtestserver" "github.com/fluxcd/pkg/runtime/conditions" conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check" @@ -67,7 +72,6 @@ import ( "github.com/fluxcd/pkg/testserver" sourcev1 "github.com/fluxcd/source-controller/api/v1" - sourcev1beta2 "github.com/fluxcd/source-controller/api/v1beta2" serror "github.com/fluxcd/source-controller/internal/error" "github.com/fluxcd/source-controller/internal/helm/chart" "github.com/fluxcd/source-controller/internal/helm/chart/secureloader" @@ -331,20 +335,20 @@ func TestHelmChartReconciler_Reconcile(t *testing.T) { func TestHelmChartReconciler_reconcileStorage(t *testing.T) { tests := []struct { name string - beforeFunc func(obj *sourcev1.HelmChart, storage *Storage) error + beforeFunc func(obj *sourcev1.HelmChart, storage *storage.Storage) error want sreconcile.Result wantErr bool - assertArtifact *sourcev1.Artifact + assertArtifact *meta.Artifact assertConditions []metav1.Condition assertPaths []string }{ { name: "garbage collects", - beforeFunc: func(obj *sourcev1.HelmChart, storage *Storage) error { + beforeFunc: func(obj *sourcev1.HelmChart, storage *storage.Storage) error { revisions := []string{"a", "b", "c", "d"} for n := range revisions { v := revisions[n] - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), Revision: v, } @@ -362,7 +366,7 @@ func TestHelmChartReconciler_reconcileStorage(t *testing.T) { conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") return nil }, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Path: "/reconcile-storage/d.txt", Revision: "d", Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", @@ -390,8 +394,8 @@ func TestHelmChartReconciler_reconcileStorage(t *testing.T) { }, { name: "notices missing artifact in storage", - beforeFunc: func(obj *sourcev1.HelmChart, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.HelmChart, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ Path: "/reconcile-storage/invalid.txt", Revision: "d", } @@ -409,10 +413,10 @@ func TestHelmChartReconciler_reconcileStorage(t *testing.T) { }, { name: "notices empty artifact digest", - beforeFunc: func(obj *sourcev1.HelmChart, storage *Storage) error { + beforeFunc: func(obj *sourcev1.HelmChart, storage *storage.Storage) error { f := "empty-digest.txt" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), Revision: "fake", } @@ -440,10 +444,10 @@ func TestHelmChartReconciler_reconcileStorage(t *testing.T) { }, { name: "notices artifact digest mismatch", - beforeFunc: func(obj *sourcev1.HelmChart, storage *Storage) error { + beforeFunc: func(obj *sourcev1.HelmChart, storage *storage.Storage) error { f := "digest-mismatch.txt" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), Revision: "fake", } @@ -471,8 +475,8 @@ func TestHelmChartReconciler_reconcileStorage(t *testing.T) { }, { name: "updates hostname on diff from current", - beforeFunc: func(obj *sourcev1.HelmChart, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.HelmChart, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ Path: "/reconcile-storage/hostname.txt", Revision: "f", Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", @@ -491,7 +495,7 @@ func TestHelmChartReconciler_reconcileStorage(t *testing.T) { assertPaths: []string{ "/reconcile-storage/hostname.txt", }, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Path: "/reconcile-storage/hostname.txt", Revision: "f", Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", @@ -569,14 +573,22 @@ func TestHelmChartReconciler_reconcileSource(t *testing.T) { tmpDir := t.TempDir() - storage, err := NewStorage(tmpDir, "example.com", retentionTTL, retentionRecords) + opts := &config.Options{ + StoragePath: tmpDir, + StorageAddress: "example.com", + StorageAdvAddress: "example.com", + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) g.Expect(err).ToNot(HaveOccurred()) - gitArtifact := &sourcev1.Artifact{ + gitArtifact := &meta.Artifact{ Revision: "mock-ref/abcdefg12345678", Path: "mock.tgz", } - g.Expect(storage.Archive(gitArtifact, "testdata/charts", nil)).To(Succeed()) + g.Expect(st.Archive(gitArtifact, "testdata/charts", nil)).To(Succeed()) tests := []struct { name string @@ -639,7 +651,7 @@ func TestHelmChartReconciler_reconcileSource(t *testing.T) { Name: "gitrepository", Kind: sourcev1.GitRepositoryKind, } - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: "some-path", Revision: "some-rev", } @@ -783,7 +795,7 @@ func TestHelmChartReconciler_reconcileSource(t *testing.T) { r := &HelmChartReconciler{ Client: clientBuilder.Build(), EventRecorder: record.NewFakeRecorder(32), - Storage: storage, + Storage: st, patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), } @@ -917,7 +929,7 @@ func TestHelmChartReconciler_buildFromHelmRepository(t *testing.T) { beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { obj.Spec.Chart = chartName obj.Spec.Version = chartVersion - obj.Status.Artifact = &sourcev1.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} + obj.Status.Artifact = &meta.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} }, want: sreconcile.ResultSuccess, assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { @@ -932,7 +944,7 @@ func TestHelmChartReconciler_buildFromHelmRepository(t *testing.T) { beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { obj.Spec.Chart = chartName obj.Spec.Version = chartVersion - obj.Status.Artifact = &sourcev1.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} + obj.Status.Artifact = &meta.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} obj.Status.ObservedValuesFiles = []string{"values.yaml", "override.yaml"} }, want: sreconcile.ResultSuccess, @@ -1015,7 +1027,7 @@ func TestHelmChartReconciler_buildFromHelmRepository(t *testing.T) { obj.Spec.Version = chartVersion obj.Status.ObservedGeneration = 2 - obj.Status.Artifact = &sourcev1.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} + obj.Status.Artifact = &meta.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} }, want: sreconcile.ResultSuccess, assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { @@ -1036,12 +1048,12 @@ func TestHelmChartReconciler_buildFromHelmRepository(t *testing.T) { } }, want: sreconcile.ResultEmpty, - wantErr: &serror.Generic{Err: errors.New("failed to get authentication secret '/invalid'")}, + wantErr: &serror.Generic{Err: errors.New("failed to get authentication secret '/invalid': secrets \"invalid\" not found")}, assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { g.Expect(build.Complete()).To(BeFalse()) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get authentication secret '/invalid'"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get authentication secret '/invalid': secrets \"invalid\" not found"), })) }, }, @@ -1113,14 +1125,14 @@ func TestHelmChartReconciler_buildFromHelmRepository(t *testing.T) { clientBuilder.WithObjects(tt.secret.DeepCopy()) } - storage, err := newTestStorage(server) + testStorage, err := newTestStorage(server) g.Expect(err).ToNot(HaveOccurred()) r := &HelmChartReconciler{ Client: clientBuilder.Build(), EventRecorder: record.NewFakeRecorder(32), Getters: testGetters, - Storage: storage, + Storage: testStorage, patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), } @@ -1133,7 +1145,7 @@ func TestHelmChartReconciler_buildFromHelmRepository(t *testing.T) { Timeout: &metav1.Duration{Duration: timeout}, }, Status: sourcev1.HelmRepositoryStatus{ - Artifact: &sourcev1.Artifact{ + Artifact: &meta.Artifact{ Path: "index.yaml", }, }, @@ -1186,14 +1198,22 @@ func TestHelmChartReconciler_buildFromOCIHelmRepository(t *testing.T) { metadata, err := loadTestChartToOCI(chartData, testRegistryServer, "", "", "") g.Expect(err).NotTo(HaveOccurred()) - storage, err := NewStorage(tmpDir, "example.com", retentionTTL, retentionRecords) + opts := &config.Options{ + StoragePath: tmpDir, + StorageAddress: "example.com", + StorageAdvAddress: "example.com", + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) g.Expect(err).ToNot(HaveOccurred()) - cachedArtifact := &sourcev1.Artifact{ + cachedArtifact := &meta.Artifact{ Revision: "0.1.0", Path: metadata.Name + "-" + metadata.Version + ".tgz", } - g.Expect(storage.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) + g.Expect(st.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) tests := []struct { name string @@ -1265,13 +1285,13 @@ func TestHelmChartReconciler_buildFromOCIHelmRepository(t *testing.T) { beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { obj.Spec.Chart = metadata.Name obj.Spec.Version = metadata.Version - obj.Status.Artifact = &sourcev1.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} + obj.Status.Artifact = &meta.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} }, want: sreconcile.ResultSuccess, assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { g.Expect(build.Name).To(Equal(metadata.Name)) g.Expect(build.Version).To(Equal(metadata.Version)) - g.Expect(build.Path).To(Equal(storage.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).To(Equal(st.LocalPath(*cachedArtifact.DeepCopy()))) g.Expect(build.Path).To(BeARegularFile()) g.Expect(build.ValuesFiles).To(BeEmpty()) }, @@ -1284,13 +1304,13 @@ func TestHelmChartReconciler_buildFromOCIHelmRepository(t *testing.T) { obj.Spec.Version = metadata.Version obj.Status.ObservedGeneration = 2 - obj.Status.Artifact = &sourcev1.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} + obj.Status.Artifact = &meta.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} }, want: sreconcile.ResultSuccess, assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { g.Expect(build.Name).To(Equal(metadata.Name)) g.Expect(build.Version).To(Equal(metadata.Version)) - g.Expect(build.Path).ToNot(Equal(storage.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).ToNot(Equal(st.LocalPath(*cachedArtifact.DeepCopy()))) g.Expect(build.Path).To(BeARegularFile()) }, cleanFunc: func(g *WithT, build *chart.Build) { @@ -1305,12 +1325,12 @@ func TestHelmChartReconciler_buildFromOCIHelmRepository(t *testing.T) { } }, want: sreconcile.ResultEmpty, - wantErr: &serror.Generic{Err: errors.New("failed to get authentication secret '/invalid'")}, + wantErr: &serror.Generic{Err: errors.New("failed to get authentication secret '/invalid': secrets \"invalid\" not found")}, assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { g.Expect(build.Complete()).To(BeFalse()) g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get authentication secret '/invalid'"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get authentication secret '/invalid': secrets \"invalid\" not found"), })) }, }, @@ -1354,7 +1374,7 @@ func TestHelmChartReconciler_buildFromOCIHelmRepository(t *testing.T) { Client: clientBuilder.Build(), EventRecorder: record.NewFakeRecorder(32), Getters: testGetters, - Storage: storage, + Storage: st, RegistryClientGenerator: registry.ClientGenerator, patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), } @@ -1366,7 +1386,7 @@ func TestHelmChartReconciler_buildFromOCIHelmRepository(t *testing.T) { Spec: sourcev1.HelmRepositorySpec{ URL: fmt.Sprintf("oci://%s/testrepo", testRegistryServer.registryHost), Timeout: &metav1.Duration{Duration: timeout}, - Provider: sourcev1beta2.GenericOCIProvider, + Provider: sourcev1.GenericOCIProvider, Type: sourcev1.HelmRepositoryTypeOCI, Insecure: true, }, @@ -1409,28 +1429,36 @@ func TestHelmChartReconciler_buildFromTarballArtifact(t *testing.T) { tmpDir := t.TempDir() - storage, err := NewStorage(tmpDir, "example.com", retentionTTL, retentionRecords) + opts := &config.Options{ + StoragePath: tmpDir, + StorageAddress: "example.com", + StorageAdvAddress: "example.com", + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) g.Expect(err).ToNot(HaveOccurred()) - chartsArtifact := &sourcev1.Artifact{ + chartsArtifact := &meta.Artifact{ Revision: "mock-ref/abcdefg12345678", Path: "mock.tgz", } - g.Expect(storage.Archive(chartsArtifact, "testdata/charts", nil)).To(Succeed()) - yamlArtifact := &sourcev1.Artifact{ + g.Expect(st.Archive(chartsArtifact, "testdata/charts", nil)).To(Succeed()) + yamlArtifact := &meta.Artifact{ Revision: "9876abcd", Path: "values.yaml", } - g.Expect(storage.CopyFromPath(yamlArtifact, "testdata/charts/helmchart/values.yaml")).To(Succeed()) - cachedArtifact := &sourcev1.Artifact{ + g.Expect(st.CopyFromPath(yamlArtifact, "testdata/charts/helmchart/values.yaml")).To(Succeed()) + cachedArtifact := &meta.Artifact{ Revision: "0.1.0", Path: "cached.tgz", } - g.Expect(storage.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) + g.Expect(st.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) tests := []struct { name string - source sourcev1.Artifact + source meta.Artifact beforeFunc func(obj *sourcev1.HelmChart) want sreconcile.Result wantErr error @@ -1516,7 +1544,7 @@ func TestHelmChartReconciler_buildFromTarballArtifact(t *testing.T) { assertFunc: func(g *WithT, build chart.Build) { g.Expect(build.Name).To(Equal("helmchart")) g.Expect(build.Version).To(Equal("0.1.0")) - g.Expect(build.Path).To(Equal(storage.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).To(Equal(st.LocalPath(*cachedArtifact.DeepCopy()))) g.Expect(build.Path).To(BeARegularFile()) g.Expect(build.ValuesFiles).To(BeEmpty()) }, @@ -1533,7 +1561,7 @@ func TestHelmChartReconciler_buildFromTarballArtifact(t *testing.T) { assertFunc: func(g *WithT, build chart.Build) { g.Expect(build.Name).To(Equal("helmchart")) g.Expect(build.Version).To(Equal("0.1.0")) - g.Expect(build.Path).To(Equal(storage.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).To(Equal(st.LocalPath(*cachedArtifact.DeepCopy()))) g.Expect(build.Path).To(BeARegularFile()) g.Expect(build.ValuesFiles).To(Equal([]string{"values.yaml", "override.yaml"})) }, @@ -1551,7 +1579,7 @@ func TestHelmChartReconciler_buildFromTarballArtifact(t *testing.T) { assertFunc: func(g *WithT, build chart.Build) { g.Expect(build.Name).To(Equal("helmchart")) g.Expect(build.Version).To(Equal("0.1.0")) - g.Expect(build.Path).ToNot(Equal(storage.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).ToNot(Equal(st.LocalPath(*cachedArtifact.DeepCopy()))) g.Expect(build.Path).To(BeARegularFile()) g.Expect(build.ValuesFiles).To(BeEmpty()) }, @@ -1561,7 +1589,7 @@ func TestHelmChartReconciler_buildFromTarballArtifact(t *testing.T) { }, { name: "Empty source artifact", - source: sourcev1.Artifact{}, + source: meta.Artifact{}, want: sreconcile.ResultEmpty, wantErr: &serror.Generic{Err: errors.New("no such file or directory")}, assertFunc: func(g *WithT, build chart.Build) { @@ -1588,7 +1616,7 @@ func TestHelmChartReconciler_buildFromTarballArtifact(t *testing.T) { WithStatusSubresource(&sourcev1.HelmChart{}). Build(), EventRecorder: record.NewFakeRecorder(32), - Storage: storage, + Storage: st, Getters: testGetters, RegistryClientGenerator: registry.ClientGenerator, patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), @@ -1676,7 +1704,7 @@ func TestHelmChartReconciler_reconcileArtifact(t *testing.T) { Path: filepath.Join(testStorage.BasePath, "testdata/charts/helmchart-0.1.0.tgz"), }, beforeFunc: func(obj *sourcev1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: "testdata/charts/helmchart-0.1.0.tgz", } }, @@ -1698,7 +1726,7 @@ func TestHelmChartReconciler_reconcileArtifact(t *testing.T) { }, beforeFunc: func(obj *sourcev1.HelmChart) { obj.Status.ObservedChartName = "helmchart" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Revision: "0.1.0", Path: "testdata/charts/helmchart-0.1.0.tgz", } @@ -1967,7 +1995,18 @@ func TestHelmChartReconciler_getSource(t *testing.T) { return } - g.Expect(got).To(Equal(tt.want)) + // TODO(stefan): Remove this workaround when the controller-runtime fake client restores TypeMeta + // https://github.com/kubernetes-sigs/controller-runtime/issues/3302 + unstructuredGot, err := runtime.DefaultUnstructuredConverter.ToUnstructured(got) + g.Expect(err).ToNot(HaveOccurred()) + gotName, _, err := unstructured.NestedFieldCopy(unstructuredGot, "metadata", "name") + g.Expect(err).ToNot(HaveOccurred()) + unstructuredWant, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tt.want) + g.Expect(err).ToNot(HaveOccurred()) + wantName, _, err := unstructured.NestedFieldCopy(unstructuredWant, "metadata", "name") + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(gotName).To(Equal(wantName)) g.Expect(err).ToNot(HaveOccurred()) }) } @@ -2285,7 +2324,7 @@ func TestHelmChartReconciler_notify(t *testing.T) { res: sreconcile.ResultSuccess, resErr: nil, newObjBeforeFunc: func(obj *sourcev1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} }, wantEvent: "Normal ChartPackageSucceeded packaged", }, @@ -2294,12 +2333,12 @@ func TestHelmChartReconciler_notify(t *testing.T) { res: sreconcile.ResultSuccess, resErr: nil, oldObjBeforeFunc: func(obj *sourcev1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, newObjBeforeFunc: func(obj *sourcev1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, wantEvent: "Normal ChartPackageSucceeded packaged", @@ -2309,12 +2348,12 @@ func TestHelmChartReconciler_notify(t *testing.T) { res: sreconcile.ResultSuccess, resErr: nil, oldObjBeforeFunc: func(obj *sourcev1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, newObjBeforeFunc: func(obj *sourcev1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Digest: "bbb"} + obj.Status.Artifact = &meta.Artifact{Revision: "aaa", Digest: "bbb"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, wantEvent: "Normal ChartPackageSucceeded packaged", @@ -2324,11 +2363,11 @@ func TestHelmChartReconciler_notify(t *testing.T) { res: sreconcile.ResultSuccess, resErr: nil, oldObjBeforeFunc: func(obj *sourcev1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, newObjBeforeFunc: func(obj *sourcev1.HelmChart) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, }, @@ -2516,7 +2555,7 @@ func TestHelmChartReconciler_reconcileSourceFromOCI_authStrategy(t *testing.T) { }, }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, "Unknown", "unknown build error: failed to construct Helm client's TLS config: cannot append certificate into certificate pool: invalid CA certificate"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "Unknown", "unknown build error: failed to construct Helm client's TLS config: failed to parse CA certificate"), }, }, { @@ -2595,7 +2634,7 @@ func TestHelmChartReconciler_reconcileSourceFromOCI_authStrategy(t *testing.T) { Interval: metav1.Duration{Duration: interval}, Timeout: &metav1.Duration{Duration: timeout}, Type: sourcev1.HelmRepositoryTypeOCI, - Provider: sourcev1beta2.GenericOCIProvider, + Provider: sourcev1.GenericOCIProvider, URL: fmt.Sprintf("oci://%s/testrepo", server.registryHost), Insecure: tt.insecure, }, @@ -2798,7 +2837,7 @@ func TestHelmChartRepository_reconcileSource_verifyOCISourceSignature_keyless(t Spec: sourcev1.HelmRepositorySpec{ URL: "oci://ghcr.io/stefanprodan/charts", Timeout: &metav1.Duration{Duration: timeout}, - Provider: sourcev1beta2.GenericOCIProvider, + Provider: sourcev1.GenericOCIProvider, Type: sourcev1.HelmRepositoryTypeOCI, }, } @@ -2885,19 +2924,26 @@ func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignatureNotation(t *t metadata, err := loadTestChartToOCI(chartData, server, "", "", "") g.Expect(err).NotTo(HaveOccurred()) - storage, err := NewStorage(tmpDir, server.registryHost, retentionTTL, retentionRecords) + opts := &config.Options{ + StoragePath: tmpDir, + StorageAddress: server.registryHost, + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) g.Expect(err).ToNot(HaveOccurred()) - cachedArtifact := &sourcev1.Artifact{ + cachedArtifact := &meta.Artifact{ Revision: "0.1.0", Path: metadata.Name + "-" + metadata.Version + ".tgz", } - g.Expect(storage.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) + g.Expect(st.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) certTuple := testhelper.GetRSASelfSignedSigningCertTuple("notation self-signed certs for testing") certs := []*x509.Certificate{certTuple.Cert} - signer, err := signer.New(certTuple.PrivateKey, certs) + sg, err := signer.New(certTuple.PrivateKey, certs) g.Expect(err).ToNot(HaveOccurred()) policyDocument := trustpolicy.Document{ @@ -2993,7 +3039,7 @@ func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignatureNotation(t *t obj.Spec.Version = metadata.Version obj.Spec.Verify = nil conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, "VerifyFailed", "fail msg") - obj.Status.Artifact = &sourcev1.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} + obj.Status.Artifact = &meta.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} }, want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ @@ -3040,8 +3086,8 @@ func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignatureNotation(t *t wantErrMsg: fmt.Sprintf("failed to verify the signature using provider 'notation': '%s' not found in secret '/notation-config'", snotation.DefaultTrustPolicyKey), want: sreconcile.ResultEmpty, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, "Unknown", fmt.Sprintf("failed to verify the signature using provider 'notation': '%s' not found in secret '/notation-config'", snotation.DefaultTrustPolicyKey)), - *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, fmt.Sprintf("failed to verify the signature using provider 'notation': '%s' not found in secret '/notation-config'", snotation.DefaultTrustPolicyKey)), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "Unknown", "failed to verify the signature using provider 'notation': '%s' not found in secret '/notation-config'", snotation.DefaultTrustPolicyKey), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider 'notation': '%s' not found in secret '/notation-config'", snotation.DefaultTrustPolicyKey), }, }, } @@ -3059,7 +3105,7 @@ func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignatureNotation(t *t Spec: sourcev1.HelmRepositorySpec{ URL: fmt.Sprintf("oci://%s/testrepo", server.registryHost), Timeout: &metav1.Duration{Duration: timeout}, - Provider: sourcev1beta2.GenericOCIProvider, + Provider: sourcev1.GenericOCIProvider, Type: sourcev1.HelmRepositoryTypeOCI, Insecure: true, }, @@ -3107,7 +3153,7 @@ func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignatureNotation(t *t Client: clientBuilder.Build(), EventRecorder: record.NewFakeRecorder(32), Getters: testGetters, - Storage: storage, + Storage: st, RegistryClientGenerator: registry.ClientGenerator, patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), } @@ -3149,7 +3195,7 @@ func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignatureNotation(t *t ArtifactReference: artifact, } - _, err = notation.Sign(ctx, signer, repo, signOptions) + _, err = notation.Sign(ctx, sg, repo, signOptions) g.Expect(err).ToNot(HaveOccurred()) } @@ -3209,14 +3255,21 @@ func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignatureCosign(t *tes metadata, err := loadTestChartToOCI(chartData, server, "", "", "") g.Expect(err).NotTo(HaveOccurred()) - storage, err := NewStorage(tmpDir, server.registryHost, retentionTTL, retentionRecords) + opts := &config.Options{ + StoragePath: tmpDir, + StorageAddress: server.registryHost, + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) g.Expect(err).ToNot(HaveOccurred()) - cachedArtifact := &sourcev1.Artifact{ + cachedArtifact := &meta.Artifact{ Revision: "0.1.0", Path: metadata.Name + "-" + metadata.Version + ".tgz", } - g.Expect(storage.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) + g.Expect(st.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) pf := func(b bool) ([]byte, error) { return []byte("cosign-password"), nil @@ -3305,7 +3358,7 @@ func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignatureCosign(t *tes obj.Spec.Version = metadata.Version obj.Spec.Verify = nil conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, "VerifyFailed", "fail msg") - obj.Status.Artifact = &sourcev1.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} + obj.Status.Artifact = &meta.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"} }, want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ @@ -3332,7 +3385,7 @@ func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignatureCosign(t *tes Spec: sourcev1.HelmRepositorySpec{ URL: fmt.Sprintf("oci://%s/testrepo", server.registryHost), Timeout: &metav1.Duration{Duration: timeout}, - Provider: sourcev1beta2.GenericOCIProvider, + Provider: sourcev1.GenericOCIProvider, Type: sourcev1.HelmRepositoryTypeOCI, Insecure: true, }, @@ -3352,7 +3405,7 @@ func TestHelmChartReconciler_reconcileSourceFromOCI_verifySignatureCosign(t *tes Client: clientBuilder.Build(), EventRecorder: record.NewFakeRecorder(32), Getters: testGetters, - Storage: storage, + Storage: st, RegistryClientGenerator: registry.ClientGenerator, patchOptions: getPatchOptions(helmChartReadyCondition.Owned, "sc"), } diff --git a/internal/controller/helmrepository_controller.go b/internal/controller/helmrepository_controller.go index 2fb9a1ba2..06c4494cf 100644 --- a/internal/controller/helmrepository_controller.go +++ b/internal/controller/helmrepository_controller.go @@ -42,6 +42,8 @@ import ( eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" "github.com/fluxcd/pkg/apis/meta" + intdigest "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" "github.com/fluxcd/pkg/runtime/conditions" helper "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/jitter" @@ -51,7 +53,6 @@ import ( sourcev1 "github.com/fluxcd/source-controller/api/v1" "github.com/fluxcd/source-controller/internal/cache" - intdigest "github.com/fluxcd/source-controller/internal/digest" serror "github.com/fluxcd/source-controller/internal/error" "github.com/fluxcd/source-controller/internal/helm/getter" "github.com/fluxcd/source-controller/internal/helm/repository" @@ -109,7 +110,7 @@ type HelmRepositoryReconciler struct { helper.Metrics Getters helmgetter.Providers - Storage *Storage + Storage *storage.Storage ControllerName string Cache *cache.Cache @@ -127,7 +128,7 @@ type HelmRepositoryReconcilerOptions struct { // v1.HelmRepository (sub)reconcile functions. The type implementations // are grouped and executed serially to perform the complete reconcile of the // object. -type helmRepositoryReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) +type helmRepositoryReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *meta.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) func (r *HelmRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error { return r.SetupWithManagerAndOptions(mgr, HelmRepositoryReconcilerOptions{}) @@ -257,7 +258,7 @@ func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, sp *patch.Seri } var chartRepo repository.ChartRepository - var artifact sourcev1.Artifact + var artifact meta.Artifact // Run the sub-reconcilers and build the result of reconciliation. var res sreconcile.Result @@ -329,7 +330,7 @@ func (r *HelmRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *s // The hostname of any URL in the Status of the object are updated, to ensure // they match the Storage server hostname of current runtime. func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, - obj *sourcev1.HelmRepository, _ *sourcev1.Artifact, _ *repository.ChartRepository) (sreconcile.Result, error) { + obj *sourcev1.HelmRepository, _ *meta.Artifact, _ *repository.ChartRepository) (sreconcile.Result, error) { // Garbage collect previous advertised artifact(s) from storage _ = r.garbageCollect(ctx, obj) @@ -367,7 +368,7 @@ func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, sp *pat if artifactMissing { msg += ": disappeared from storage" } - rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, msg) + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg) conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) @@ -392,7 +393,7 @@ func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, sp *pat // v1.FetchFailedCondition is removed, and the repository.ChartRepository // pointer is set to the newly fetched index. func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, - obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { + obj *sourcev1.HelmRepository, artifact *meta.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { // Ensure it's not an OCI URL. API validation ensures that only // http/https/oci scheme are allowed. if strings.HasPrefix(obj.Spec.URL, helmreg.OCIScheme) { @@ -529,7 +530,7 @@ func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patc // early. // On a successful archive, the Artifact in the Status of the object is set, // and the symlink in the Storage is updated to its path. -func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { +func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *meta.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { // Set the ArtifactInStorageCondition if there's no drift. defer func() { if obj.GetArtifact().HasRevision(artifact.Revision) { @@ -677,7 +678,7 @@ func (r *HelmRepositoryReconciler) garbageCollect(ctx context.Context, obj *sour } if len(delFiles) > 0 { r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", - fmt.Sprintf("garbage collected %d artifacts", len(delFiles))) + "garbage collected %d artifacts", len(delFiles)) return nil } } diff --git a/internal/controller/helmrepository_controller_test.go b/internal/controller/helmrepository_controller_test.go index 9724baf65..d76c58a42 100644 --- a/internal/controller/helmrepository_controller_test.go +++ b/internal/controller/helmrepository_controller_test.go @@ -18,7 +18,6 @@ package controller import ( "context" - "crypto/tls" "encoding/json" "errors" "fmt" @@ -44,20 +43,20 @@ import ( kstatus "github.com/fluxcd/cli-utils/pkg/kstatus/status" "github.com/fluxcd/pkg/apis/meta" + intdigest "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" "github.com/fluxcd/pkg/helmtestserver" "github.com/fluxcd/pkg/runtime/conditions" conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check" "github.com/fluxcd/pkg/runtime/patch" + "github.com/fluxcd/pkg/runtime/secrets" sourcev1 "github.com/fluxcd/source-controller/api/v1" "github.com/fluxcd/source-controller/internal/cache" - intdigest "github.com/fluxcd/source-controller/internal/digest" - "github.com/fluxcd/source-controller/internal/helm/getter" "github.com/fluxcd/source-controller/internal/helm/repository" intpredicates "github.com/fluxcd/source-controller/internal/predicates" sreconcile "github.com/fluxcd/source-controller/internal/reconcile" "github.com/fluxcd/source-controller/internal/reconcile/summarize" - stls "github.com/fluxcd/source-controller/internal/tls" ) func TestHelmRepositoryReconciler_deleteBeforeFinalizer(t *testing.T) { @@ -174,20 +173,20 @@ func TestHelmRepositoryReconciler_Reconcile(t *testing.T) { func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { tests := []struct { name string - beforeFunc func(obj *sourcev1.HelmRepository, storage *Storage) error + beforeFunc func(obj *sourcev1.HelmRepository, storage *storage.Storage) error want sreconcile.Result wantErr bool - assertArtifact *sourcev1.Artifact + assertArtifact *meta.Artifact assertConditions []metav1.Condition assertPaths []string }{ { name: "garbage collects", - beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error { + beforeFunc: func(obj *sourcev1.HelmRepository, storage *storage.Storage) error { revisions := []string{"a", "b", "c", "d"} for n := range revisions { v := revisions[n] - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), Revision: v, } @@ -205,7 +204,7 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") return nil }, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Path: "/reconcile-storage/d.txt", Revision: "d", Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", @@ -233,8 +232,8 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { }, { name: "notices missing artifact in storage", - beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.HelmRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ Path: "/reconcile-storage/invalid.txt", Revision: "d", } @@ -252,10 +251,10 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { }, { name: "notices empty artifact digest", - beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error { + beforeFunc: func(obj *sourcev1.HelmRepository, storage *storage.Storage) error { f := "empty-digest.txt" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), Revision: "fake", } @@ -283,10 +282,10 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { }, { name: "notices artifact digest mismatch", - beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error { + beforeFunc: func(obj *sourcev1.HelmRepository, storage *storage.Storage) error { f := "digest-mismatch.txt" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/reconcile-storage/%s.txt", f), Revision: "fake", } @@ -314,8 +313,8 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { }, { name: "updates hostname on diff from current", - beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.HelmRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ Path: "/reconcile-storage/hostname.txt", Revision: "f", Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", @@ -334,7 +333,7 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { assertPaths: []string{ "/reconcile-storage/hostname.txt", }, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Path: "/reconcile-storage/hostname.txt", Revision: "f", Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", @@ -376,7 +375,7 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { }() var chartRepo repository.ChartRepository - var artifact sourcev1.Artifact + var artifact meta.Artifact sp := patch.NewSerialPatcher(obj, r.Client) got, err := r.reconcileStorage(context.TODO(), sp, obj, &artifact, &chartRepo) @@ -420,26 +419,29 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { server options url string secret *corev1.Secret - beforeFunc func(t *WithT, obj *sourcev1.HelmRepository, rev digest.Digest) - afterFunc func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) + beforeFunc func(t *WithT, obj *sourcev1.HelmRepository) + revFunc func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest + afterFunc func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) want sreconcile.Result wantErr bool assertConditions []metav1.Condition }{ { - name: "HTTPS with certSecretRef pointing to CA cert but public repo URL succeeds", + name: "HTTPS with certSecretRef non-matching CA succeeds via system CA pool", protocol: "http", url: "https://stefanprodan.github.io/podinfo", want: sreconcile.ResultSuccess, + wantErr: false, secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "ca-file", + Name: "ca-file", + Namespace: "default", }, Data: map[string][]byte{ "ca.crt": tlsCA, }, }, - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, rev digest.Digest) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.CertSecretRef = &meta.LocalObjectReference{Name: "ca-file"} }, assertConditions: []metav1.Condition{ @@ -457,21 +459,43 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { }, secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "ca-file", + Name: "ca-file", + Namespace: "default", }, Data: map[string][]byte{ "ca.crt": tlsCA, }, }, - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, rev digest.Digest) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.CertSecretRef = &meta.LocalObjectReference{Name: "ca-file"} }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + tlsConfig, err := secrets.TLSConfigFromSecret(context.TODO(), secret) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, tlsConfig, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { t.Expect(chartRepo.Path).ToNot(BeEmpty()) t.Expect(chartRepo.Index).ToNot(BeNil()) t.Expect(artifact.Revision).ToNot(BeEmpty()) @@ -487,21 +511,43 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { }, secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "ca-file", + Name: "ca-file", + Namespace: "default", }, Data: map[string][]byte{ "caFile": tlsCA, }, }, - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, rev digest.Digest) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"} }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + tlsConfig, err := secrets.TLSConfigFromSecret(context.TODO(), secret) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, tlsConfig, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { t.Expect(chartRepo.Path).ToNot(BeEmpty()) t.Expect(chartRepo.Index).ToNot(BeNil()) t.Expect(artifact.Revision).ToNot(BeEmpty()) @@ -518,22 +564,44 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { }, secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "ca-file", + Name: "ca-file", + Namespace: "default", }, Data: map[string][]byte{ "caFile": tlsCA, }, Type: corev1.SecretTypeDockerConfigJson, }, - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, rev digest.Digest) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"} }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + tlsConfig, err := secrets.TLSConfigFromSecret(context.TODO(), secret) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, tlsConfig, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { t.Expect(chartRepo.Path).ToNot(BeEmpty()) t.Expect(chartRepo.Index).ToNot(BeNil()) t.Expect(artifact.Revision).ToNot(BeEmpty()) @@ -542,12 +610,30 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { { name: "HTTP without secretRef makes ArtifactOutdated=True", protocol: "http", - want: sreconcile.ResultSuccess, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, nil, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { t.Expect(chartRepo.Path).ToNot(BeEmpty()) t.Expect(chartRepo.Index).ToNot(BeNil()) t.Expect(artifact.Revision).ToNot(BeEmpty()) @@ -562,22 +648,45 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { }, secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "basic-auth", + Name: "basic-auth", + Namespace: "default", }, Data: map[string][]byte{ "username": []byte("git"), "password": []byte("1234"), }, }, - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, rev digest.Digest) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"} }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + basicAuth, err := secrets.BasicAuthFromSecret(context.TODO(), secret) + t.Expect(err).ToNot(HaveOccurred()) + + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + helmgetter.WithBasicAuth(basicAuth.Username, basicAuth.Password), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, nil, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { t.Expect(chartRepo.Path).ToNot(BeEmpty()) t.Expect(chartRepo.Index).ToNot(BeNil()) t.Expect(artifact.Revision).ToNot(BeEmpty()) @@ -593,7 +702,8 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { }, secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "basic-auth", + Name: "basic-auth", + Namespace: "default", }, Data: map[string][]byte{ "username": []byte("git"), @@ -601,15 +711,37 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { }, Type: corev1.SecretTypeDockerConfigJson, }, - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, rev digest.Digest) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"} }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + basicAuth, err := secrets.BasicAuthFromSecret(context.TODO(), secret) + t.Expect(err).ToNot(HaveOccurred()) + + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + helmgetter.WithBasicAuth(basicAuth.Username, basicAuth.Password), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, nil, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { t.Expect(chartRepo.Path).ToNot(BeEmpty()) t.Expect(chartRepo.Index).ToNot(BeNil()) t.Expect(artifact.Revision).ToNot(BeEmpty()) @@ -625,24 +757,25 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { }, secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "invalid-ca", + Name: "invalid-ca", + Namespace: "default", }, Data: map[string][]byte{ "ca.crt": []byte("invalid"), }, }, - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, rev digest.Digest) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.CertSecretRef = &meta.LocalObjectReference{Name: "invalid-ca"} conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "cannot append certificate into certificate pool: invalid CA certificate"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to construct Helm client's TLS config: failed to parse CA certificate"), *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { // No repo index due to fetch fail. t.Expect(chartRepo.Path).To(BeEmpty()) t.Expect(chartRepo.Index).To(BeNil()) @@ -652,7 +785,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { { name: "Invalid URL makes FetchFailed=True and returns stalling error", protocol: "http", - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, rev digest.Digest) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.URL = strings.ReplaceAll(obj.Spec.URL, "http://", "") conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") @@ -664,7 +797,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { // No repo index due to fetch fail. t.Expect(chartRepo.Path).To(BeEmpty()) t.Expect(chartRepo.Index).To(BeNil()) @@ -674,7 +807,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { { name: "Unsupported scheme makes FetchFailed=True and returns stalling error", protocol: "http", - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, rev digest.Digest) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.URL = strings.ReplaceAll(obj.Spec.URL, "http://", "ftp://") conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") @@ -686,7 +819,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { // No repo index due to fetch fail. t.Expect(chartRepo.Path).To(BeEmpty()) t.Expect(chartRepo.Index).To(BeNil()) @@ -696,7 +829,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { { name: "Missing secret returns FetchFailed=True and returns error", protocol: "http", - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, rev digest.Digest) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "non-existing"} conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") @@ -707,7 +840,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { // No repo index due to fetch fail. t.Expect(chartRepo.Path).To(BeEmpty()) t.Expect(chartRepo.Index).To(BeNil()) @@ -719,24 +852,25 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { protocol: "http", secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "malformed-basic-auth", + Name: "malformed-basic-auth", + Namespace: "default", }, Data: map[string][]byte{ "username": []byte("git"), }, }, - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, rev digest.Digest) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "malformed-basic-auth"} conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "required fields 'username' and 'password"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "secret 'default/malformed-basic-auth': malformed basic auth - has 'username' but missing 'password'"), *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { // No repo index due to fetch fail. t.Expect(chartRepo.Path).To(BeEmpty()) t.Expect(chartRepo.Index).To(BeNil()) @@ -746,20 +880,34 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { { name: "Stored index with same revision", protocol: "http", - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, rev digest.Digest) { - obj.Status.Artifact = &sourcev1.Artifact{ - Revision: rev.String(), - } - + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "foo", "bar") }, + revFunc: func(t *WithT, server *helmtestserver.HelmServer, secret *corev1.Secret) digest.Digest { + serverURL := server.URL() + repoURL, err := repository.NormalizeURL(serverURL) + t.Expect(err).ToNot(HaveOccurred()) + + getterOpts := []helmgetter.Option{ + helmgetter.WithURL(repoURL), + } + + chartRepo, err := repository.NewChartRepository(repoURL, "", testGetters, nil, getterOpts...) + t.Expect(err).ToNot(HaveOccurred()) + + err = chartRepo.CacheIndex() + t.Expect(err).ToNot(HaveOccurred()) + + digest := chartRepo.Digest(intdigest.Canonical) + return digest + }, assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { t.Expect(chartRepo.Path).ToNot(BeEmpty()) t.Expect(chartRepo.Index).To(BeNil()) @@ -770,8 +918,8 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { { name: "Stored index with different revision", protocol: "http", - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, rev digest.Digest) { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{ Revision: "80bb3dd67c63095d985850459834ea727603727a370079de90d221191d375a86", } conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") @@ -783,7 +931,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"), *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"), }, - afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) { + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, chartRepo *repository.ChartRepository) { t.Expect(chartRepo.Path).ToNot(BeEmpty()) t.Expect(chartRepo.Index).ToNot(BeNil()) @@ -795,8 +943,8 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { { name: "Existing artifact makes ArtifactOutdated=True", protocol: "http", - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, rev digest.Digest) { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Status.Artifact = &meta.Artifact{ Path: "some-path", Revision: "some-rev", } @@ -815,6 +963,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ GenerateName: "auth-strategy-", Generation: 1, + Namespace: "default", }, Spec: sourcev1.HelmRepositorySpec{ Interval: metav1.Duration{Duration: interval}, @@ -873,48 +1022,9 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { clientBuilder.WithObjects(secret.DeepCopy()) } - // Calculate the artifact digest for valid repos configurations. - getterOpts := []helmgetter.Option{ - helmgetter.WithURL(server.URL()), - } - var newChartRepo *repository.ChartRepository - var tlsConf *tls.Config - validSecret := true - if secret != nil { - // Extract the client options from secret, ignoring any invalid - // value. validSecret is used to determine if the index digest - // should be calculated below. - var gOpts []helmgetter.Option - var serr error - gOpts, serr = getter.GetterOptionsFromSecret(*secret) - if serr != nil { - validSecret = false - } - getterOpts = append(getterOpts, gOpts...) - repoURL := server.URL() - if tt.url != "" { - repoURL = tt.url - } - tlsConf, _, serr = stls.KubeTLSClientConfigFromSecret(*secret, repoURL) - if serr != nil { - validSecret = false - } - if tlsConf == nil { - tlsConf, _, serr = stls.TLSClientConfigFromSecret(*secret, repoURL) - if serr != nil { - validSecret = false - } - } - newChartRepo, err = repository.NewChartRepository(obj.Spec.URL, "", testGetters, tlsConf, getterOpts...) - } else { - newChartRepo, err = repository.NewChartRepository(obj.Spec.URL, "", testGetters, nil) - } - g.Expect(err).ToNot(HaveOccurred()) - var rev digest.Digest - if validSecret { - g.Expect(newChartRepo.CacheIndex()).To(Succeed()) - rev = newChartRepo.Digest(intdigest.Canonical) + if tt.revFunc != nil { + rev = tt.revFunc(g, server, secret) } r := &HelmRepositoryReconciler{ @@ -925,7 +1035,14 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { patchOptions: getPatchOptions(helmRepositoryReadyCondition.Owned, "sc"), } if tt.beforeFunc != nil { - tt.beforeFunc(g, obj, rev) + tt.beforeFunc(g, obj) + } + + // Special handling for tests that need to set revision after calculation + if tt.name == "Stored index with same revision" && rev != "" { + obj.Status.Artifact = &meta.Artifact{ + Revision: rev.String(), + } } g.Expect(r.Client.Create(context.TODO(), obj)).ToNot(HaveOccurred()) @@ -934,7 +1051,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { }() var chartRepo repository.ChartRepository - var artifact sourcev1.Artifact + var artifact meta.Artifact sp := patch.NewSerialPatcher(obj, r.Client) got, err := r.reconcileSource(context.TODO(), sp, obj, &artifact, &chartRepo) @@ -959,7 +1076,7 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { tests := []struct { name string cache *cache.Cache - beforeFunc func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) + beforeFunc func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) afterFunc func(t *WithT, obj *sourcev1.HelmRepository, cache *cache.Cache) want sreconcile.Result wantErr bool @@ -967,7 +1084,7 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { }{ { name: "Archiving artifact to storage makes ArtifactInStorage=True and artifact is stored as JSON", - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) { obj.Spec.Interval = metav1.Duration{Duration: interval} }, want: sreconcile.ResultSuccess, @@ -984,7 +1101,7 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { { name: "Archiving (loaded) artifact to storage adds to cache", cache: cache.New(10, time.Minute), - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) { index.Index = &repo.IndexFile{ APIVersion: "v1", Generated: time.Now(), @@ -1003,7 +1120,7 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { }, { name: "Up-to-date artifact should not update status", - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) { obj.Spec.Interval = metav1.Duration{Duration: interval} obj.Status.Artifact = artifact.DeepCopy() }, @@ -1017,7 +1134,7 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { }, { name: "Removes ArtifactOutdatedCondition after creating a new artifact", - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) { obj.Spec.Interval = metav1.Duration{Duration: interval} conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") }, @@ -1028,7 +1145,7 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { }, { name: "Creates latest symlink to the created artifact", - beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact meta.Artifact, index *repository.ChartRepository) { obj.Spec.Interval = metav1.Duration{Duration: interval} }, afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, _ *cache.Cache) { @@ -1110,7 +1227,7 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { func TestHelmRepositoryReconciler_reconcileSubRecs(t *testing.T) { // Helper to build simple helmRepositoryReconcileFunc with result and error. buildReconcileFuncs := func(r sreconcile.Result, e error) helmRepositoryReconcileFunc { - return func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { + return func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *meta.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { return r, e } } @@ -1165,11 +1282,11 @@ func TestHelmRepositoryReconciler_reconcileSubRecs(t *testing.T) { { name: "multiple object status conditions mutations", reconcileFuncs: []helmRepositoryReconcileFunc{ - func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { + func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *meta.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision") return sreconcile.ResultSuccess, nil }, - func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { + func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *meta.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { conditions.MarkTrue(obj, meta.ReconcilingCondition, meta.ProgressingReason, "creating artifact") return sreconcile.ResultSuccess, nil }, @@ -1364,7 +1481,7 @@ func TestHelmRepositoryReconciler_notify(t *testing.T) { res: sreconcile.ResultSuccess, resErr: nil, newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy", Size: nil} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: nil} }, wantEvent: "Normal NewArtifact stored fetched index of unknown size", }, @@ -1373,7 +1490,7 @@ func TestHelmRepositoryReconciler_notify(t *testing.T) { res: sreconcile.ResultSuccess, resErr: nil, newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} }, wantEvent: "Normal NewArtifact stored fetched index of size", }, @@ -1382,12 +1499,12 @@ func TestHelmRepositoryReconciler_notify(t *testing.T) { res: sreconcile.ResultSuccess, resErr: nil, oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, wantEvent: "Normal Succeeded stored fetched index of size", @@ -1397,12 +1514,12 @@ func TestHelmRepositoryReconciler_notify(t *testing.T) { res: sreconcile.ResultSuccess, resErr: nil, oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Digest: "bbb", Size: &aSize} + obj.Status.Artifact = &meta.Artifact{Revision: "aaa", Digest: "bbb", Size: &aSize} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, wantEvent: "Normal NewArtifact stored fetched index of size", @@ -1412,11 +1529,11 @@ func TestHelmRepositoryReconciler_notify(t *testing.T) { res: sreconcile.ResultSuccess, resErr: nil, oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, newObjBeforeFunc: func(obj *sourcev1.HelmRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, }, diff --git a/internal/controller/ocirepository_controller.go b/internal/controller/ocirepository_controller.go index 6f2c0737c..a91c8a51b 100644 --- a/internal/controller/ocirepository_controller.go +++ b/internal/controller/ocirepository_controller.go @@ -43,7 +43,6 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" kuberecorder "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" "k8s.io/utils/ptr" @@ -51,6 +50,9 @@ import ( eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" + "github.com/fluxcd/pkg/cache" "github.com/fluxcd/pkg/oci" "github.com/fluxcd/pkg/runtime/conditions" helper "github.com/fluxcd/pkg/runtime/controller" @@ -58,6 +60,7 @@ import ( "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" rreconcile "github.com/fluxcd/pkg/runtime/reconcile" + "github.com/fluxcd/pkg/runtime/secrets" "github.com/fluxcd/pkg/sourceignore" "github.com/fluxcd/pkg/tar" "github.com/fluxcd/pkg/version" @@ -69,19 +72,17 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" sourcev1 "github.com/fluxcd/source-controller/api/v1" - ociv1 "github.com/fluxcd/source-controller/api/v1beta2" serror "github.com/fluxcd/source-controller/internal/error" soci "github.com/fluxcd/source-controller/internal/oci" scosign "github.com/fluxcd/source-controller/internal/oci/cosign" "github.com/fluxcd/source-controller/internal/oci/notation" sreconcile "github.com/fluxcd/source-controller/internal/reconcile" "github.com/fluxcd/source-controller/internal/reconcile/summarize" - "github.com/fluxcd/source-controller/internal/tls" "github.com/fluxcd/source-controller/internal/util" ) // ociRepositoryReadyCondition contains the information required to summarize a -// v1beta2.OCIRepository Ready Condition. +// v1.OCIRepository Ready Condition. var ociRepositoryReadyCondition = summarize.Conditions{ Target: meta.ReadyCondition, Owned: []string{ @@ -128,19 +129,20 @@ func (e invalidOCIURLError) Error() string { return e.err.Error() } -// ociRepositoryReconcileFunc is the function type for all the v1beta2.OCIRepository +// ociRepositoryReconcileFunc is the function type for all the v1.OCIRepository // (sub)reconcile functions. The type implementations are grouped and // executed serially to perform the complete reconcile of the object. -type ociRepositoryReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *ociv1.OCIRepository, metadata *sourcev1.Artifact, dir string) (sreconcile.Result, error) +type ociRepositoryReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.OCIRepository, metadata *meta.Artifact, dir string) (sreconcile.Result, error) -// OCIRepositoryReconciler reconciles a v1beta2.OCIRepository object +// OCIRepositoryReconciler reconciles a v1.OCIRepository object type OCIRepositoryReconciler struct { client.Client helper.Metrics kuberecorder.EventRecorder - Storage *Storage + Storage *storage.Storage ControllerName string + TokenCache *cache.TokenCache requeueDependency time.Duration patchOptions []patch.Option @@ -162,7 +164,7 @@ func (r *OCIRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, o r.requeueDependency = opts.DependencyRequeueInterval return ctrl.NewControllerManagedBy(mgr). - For(&ociv1.OCIRepository{}, builder.WithPredicates( + For(&sourcev1.OCIRepository{}, builder.WithPredicates( predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}), )). WithOptions(controller.Options{ @@ -175,13 +177,14 @@ func (r *OCIRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, o // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=ocirepositories/status,verbs=get;update;patch // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=ocirepositories/finalizers,verbs=get;create;update;patch;delete // +kubebuilder:rbac:groups="",resources=events,verbs=create;patch +// +kubebuilder:rbac:groups="",resources=serviceaccounts/token,verbs=create func (r *OCIRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { start := time.Now() log := ctrl.LoggerFrom(ctx) // Fetch the OCIRepository - obj := &ociv1.OCIRepository{} + obj := &sourcev1.OCIRepository{} if err := r.Get(ctx, req.NamespacedName, obj); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } @@ -253,7 +256,7 @@ func (r *OCIRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques // reconcile iterates through the ociRepositoryReconcileFunc tasks for the // object. It returns early on the first call that returns // reconcile.ResultRequeue, or produces an error. -func (r *OCIRepositoryReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *ociv1.OCIRepository, reconcilers []ociRepositoryReconcileFunc) (sreconcile.Result, error) { +func (r *OCIRepositoryReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.OCIRepository, reconcilers []ociRepositoryReconcileFunc) (sreconcile.Result, error) { oldObj := obj.DeepCopy() rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress") @@ -298,7 +301,7 @@ func (r *OCIRepositoryReconciler) reconcile(ctx context.Context, sp *patch.Seria var ( res sreconcile.Result resErr error - metadata = sourcev1.Artifact{} + metadata = meta.Artifact{} ) // Run the sub-reconcilers and build the result of reconciliation. @@ -325,10 +328,10 @@ func (r *OCIRepositoryReconciler) reconcile(ctx context.Context, sp *patch.Seria } // reconcileSource fetches the upstream OCI artifact metadata and content. -// If this fails, it records v1beta2.FetchFailedCondition=True on the object and returns early. +// If this fails, it records v1.FetchFailedCondition=True on the object and returns early. func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, - obj *ociv1.OCIRepository, metadata *sourcev1.Artifact, dir string) (sreconcile.Result, error) { - var auth authn.Authenticator + obj *sourcev1.OCIRepository, metadata *meta.Artifact, dir string) (sreconcile.Result, error) { + var authenticator authn.Authenticator ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() @@ -352,20 +355,55 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch return sreconcile.ResultEmpty, e } - proxyURL, err := r.getProxyURL(ctx, obj) - if err != nil { - e := serror.NewGeneric( - fmt.Errorf("failed to get proxy address: %w", err), - sourcev1.AuthenticationFailedReason, - ) - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) - return sreconcile.ResultEmpty, e + var proxyURL *url.URL + if obj.Spec.ProxySecretRef != nil { + var err error + proxyURL, err = secrets.ProxyURLFromSecretRef(ctx, r.Client, types.NamespacedName{ + Name: obj.Spec.ProxySecretRef.Name, + Namespace: obj.GetNamespace(), + }) + if err != nil { + e := serror.NewGeneric( + fmt.Errorf("failed to get proxy address: %w", err), + sourcev1.AuthenticationFailedReason, + ) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) + return sreconcile.ResultEmpty, e + } } - if _, ok := keychain.(soci.Anonymous); obj.Spec.Provider != ociv1.GenericOCIProvider && ok { + if _, ok := keychain.(soci.Anonymous); obj.Spec.Provider != "" && obj.Spec.Provider != sourcev1.GenericOCIProvider && ok { + opts := []auth.Option{ + auth.WithClient(r.Client), + auth.WithServiceAccountNamespace(obj.GetNamespace()), + } + + if obj.Spec.ServiceAccountName != "" { + // Check object-level workload identity feature gate. + if !auth.IsObjectLevelWorkloadIdentityEnabled() { + const gate = auth.FeatureGateObjectLevelWorkloadIdentity + const msgFmt = "to use spec.serviceAccountName for provider authentication please enable the %s feature gate in the controller" + err := fmt.Errorf(msgFmt, gate) + return sreconcile.ResultEmpty, serror.NewStalling(err, meta.FeatureGateDisabledReason) + } + // Set ServiceAccountName only if explicitly specified + opts = append(opts, auth.WithServiceAccountName(obj.Spec.ServiceAccountName)) + } + if r.TokenCache != nil { + involvedObject := cache.InvolvedObject{ + Kind: sourcev1.OCIRepositoryKind, + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + Operation: cache.OperationReconcile, + } + opts = append(opts, auth.WithCache(*r.TokenCache, involvedObject)) + } + if proxyURL != nil { + opts = append(opts, auth.WithProxyURL(*proxyURL)) + } var authErr error - auth, authErr = soci.OIDCAuth(ctxTimeout, obj.Spec.URL, obj.Spec.Provider, proxyURL) - if authErr != nil && !errors.Is(authErr, oci.ErrUnconfiguredProvider) { + authenticator, authErr = soci.OIDCAuth(ctxTimeout, obj.Spec.URL, obj.Spec.Provider, opts...) + if authErr != nil { e := serror.NewGeneric( fmt.Errorf("failed to get credential from %s: %w", obj.Spec.Provider, authErr), sourcev1.AuthenticationFailedReason, @@ -386,7 +424,7 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch return sreconcile.ResultEmpty, e } - opts := makeRemoteOptions(ctx, transport, keychain, auth) + opts := makeRemoteOptions(ctx, transport, keychain, authenticator) // Determine which artifact revision to pull ref, err := r.getArtifactRef(obj, opts) @@ -412,12 +450,12 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch if err != nil { e := serror.NewGeneric( fmt.Errorf("failed to determine artifact digest: %w", err), - ociv1.OCIPullFailedReason, + sourcev1.OCIPullFailedReason, ) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } - metaArtifact := &sourcev1.Artifact{Revision: revision} + metaArtifact := &meta.Artifact{Revision: revision} metaArtifact.DeepCopyInto(metadata) // Mark observations about the revision on the object @@ -446,7 +484,7 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch conditions.GetObservedGeneration(obj, sourcev1.SourceVerifiedCondition) != obj.Generation || conditions.IsFalse(obj, sourcev1.SourceVerifiedCondition) { - result, err := r.verifySignature(ctx, obj, ref, keychain, auth, transport, opts...) + result, err := r.verifySignature(ctx, obj, ref, keychain, authenticator, transport, opts...) if err != nil { provider := obj.Spec.Verify.Provider if obj.Spec.Verify.SecretRef == nil && obj.Spec.Verify.Provider == "cosign" { @@ -477,7 +515,7 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch if err != nil { e := serror.NewGeneric( fmt.Errorf("failed to pull artifact from '%s': %w", obj.Spec.URL, err), - ociv1.OCIPullFailedReason, + sourcev1.OCIPullFailedReason, ) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e @@ -488,7 +526,7 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch if err != nil { e := serror.NewGeneric( fmt.Errorf("failed to parse artifact manifest: %w", err), - ociv1.OCILayerOperationFailedReason, + sourcev1.OCILayerOperationFailedReason, ) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e @@ -498,29 +536,29 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch // Extract the compressed content from the selected layer blob, err := r.selectLayer(obj, img) if err != nil { - e := serror.NewGeneric(err, ociv1.OCILayerOperationFailedReason) + e := serror.NewGeneric(err, sourcev1.OCILayerOperationFailedReason) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } // Persist layer content to storage using the specified operation switch obj.GetLayerOperation() { - case ociv1.OCILayerExtract: + case sourcev1.OCILayerExtract: if err = tar.Untar(blob, dir, tar.WithMaxUntarSize(-1), tar.WithSkipSymlinks()); err != nil { e := serror.NewGeneric( fmt.Errorf("failed to extract layer contents from artifact: %w", err), - ociv1.OCILayerOperationFailedReason, + sourcev1.OCILayerOperationFailedReason, ) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } - case ociv1.OCILayerCopy: + case sourcev1.OCILayerCopy: metadata.Path = fmt.Sprintf("%s.tgz", r.digestFromRevision(metadata.Revision)) file, err := os.Create(filepath.Join(dir, metadata.Path)) if err != nil { e := serror.NewGeneric( fmt.Errorf("failed to create file to copy layer to: %w", err), - ociv1.OCILayerOperationFailedReason, + sourcev1.OCILayerOperationFailedReason, ) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e @@ -531,7 +569,7 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch if err != nil { e := serror.NewGeneric( fmt.Errorf("failed to copy layer from artifact: %w", err), - ociv1.OCILayerOperationFailedReason, + sourcev1.OCILayerOperationFailedReason, ) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e @@ -539,7 +577,7 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch default: e := serror.NewGeneric( fmt.Errorf("unsupported layer operation: %s", obj.GetLayerOperation()), - ociv1.OCILayerOperationFailedReason, + sourcev1.OCILayerOperationFailedReason, ) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e @@ -551,7 +589,7 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch // selectLayer finds the matching layer and returns its compressed contents. // If no layer selector was provided, we pick the first layer from the OCI artifact. -func (r *OCIRepositoryReconciler) selectLayer(obj *ociv1.OCIRepository, image gcrv1.Image) (io.ReadCloser, error) { +func (r *OCIRepositoryReconciler) selectLayer(obj *sourcev1.OCIRepository, image gcrv1.Image) (io.ReadCloser, error) { layers, err := image.Layers() if err != nil { return nil, fmt.Errorf("failed to parse artifact layers: %w", err) @@ -632,7 +670,7 @@ func (r *OCIRepositoryReconciler) digestFromRevision(revision string) string { // If not, when using cosign it falls back to a keyless approach for verification. // When notation is used, a trust policy is required to verify the image. // The verification result is returned as a VerificationResult and any error encountered. -func (r *OCIRepositoryReconciler) verifySignature(ctx context.Context, obj *ociv1.OCIRepository, +func (r *OCIRepositoryReconciler) verifySignature(ctx context.Context, obj *sourcev1.OCIRepository, ref name.Reference, keychain authn.Keychain, auth authn.Authenticator, transport *http.Transport, opt ...remote.Option) (soci.VerificationResult, error) { @@ -800,12 +838,12 @@ func (r *OCIRepositoryReconciler) retrieveSecret(ctx context.Context, verifySecr } // parseRepository validates and extracts the repository URL. -func (r *OCIRepositoryReconciler) parseRepository(obj *ociv1.OCIRepository) (name.Repository, error) { - if !strings.HasPrefix(obj.Spec.URL, ociv1.OCIRepositoryPrefix) { +func (r *OCIRepositoryReconciler) parseRepository(obj *sourcev1.OCIRepository) (name.Repository, error) { + if !strings.HasPrefix(obj.Spec.URL, sourcev1.OCIRepositoryPrefix) { return name.Repository{}, fmt.Errorf("URL must be in format 'oci:////'") } - url := strings.TrimPrefix(obj.Spec.URL, ociv1.OCIRepositoryPrefix) + url := strings.TrimPrefix(obj.Spec.URL, sourcev1.OCIRepositoryPrefix) options := []name.Option{} if obj.Spec.Insecure { @@ -825,7 +863,7 @@ func (r *OCIRepositoryReconciler) parseRepository(obj *ociv1.OCIRepository) (nam } // getArtifactRef determines which tag or revision should be used and returns the OCI artifact FQN. -func (r *OCIRepositoryReconciler) getArtifactRef(obj *ociv1.OCIRepository, options []remote.Option) (name.Reference, error) { +func (r *OCIRepositoryReconciler) getArtifactRef(obj *sourcev1.OCIRepository, options []remote.Option) (name.Reference, error) { repo, err := r.parseRepository(obj) if err != nil { return nil, invalidOCIURLError{err} @@ -889,45 +927,37 @@ func (r *OCIRepositoryReconciler) getTagBySemver(repo name.Repository, exp strin // keychain generates the credential keychain based on the resource // configuration. If no auth is specified a default keychain with // anonymous access is returned -func (r *OCIRepositoryReconciler) keychain(ctx context.Context, obj *ociv1.OCIRepository) (authn.Keychain, error) { - pullSecretNames := sets.NewString() +func (r *OCIRepositoryReconciler) keychain(ctx context.Context, obj *sourcev1.OCIRepository) (authn.Keychain, error) { + var imagePullSecrets []corev1.Secret // lookup auth secret if obj.Spec.SecretRef != nil { - pullSecretNames.Insert(obj.Spec.SecretRef.Name) + var imagePullSecret corev1.Secret + secretRef := types.NamespacedName{Namespace: obj.Namespace, Name: obj.Spec.SecretRef.Name} + err := r.Get(ctx, secretRef, &imagePullSecret) + if err != nil { + r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.AuthenticationFailedReason, + "auth secret '%s' not found", obj.Spec.SecretRef.Name) + return nil, fmt.Errorf("failed to get secret '%s': %w", secretRef, err) + } + imagePullSecrets = append(imagePullSecrets, imagePullSecret) } // lookup service account if obj.Spec.ServiceAccountName != "" { - serviceAccountName := obj.Spec.ServiceAccountName - serviceAccount := corev1.ServiceAccount{} - err := r.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: serviceAccountName}, &serviceAccount) + saRef := types.NamespacedName{Namespace: obj.Namespace, Name: obj.Spec.ServiceAccountName} + saSecrets, err := secrets.PullSecretsFromServiceAccountRef(ctx, r.Client, saRef) if err != nil { return nil, err } - for _, ips := range serviceAccount.ImagePullSecrets { - pullSecretNames.Insert(ips.Name) - } + imagePullSecrets = append(imagePullSecrets, saSecrets...) } // if no pullsecrets available return an AnonymousKeychain - if len(pullSecretNames) == 0 { + if len(imagePullSecrets) == 0 { return soci.Anonymous{}, nil } - // lookup image pull secrets - imagePullSecrets := make([]corev1.Secret, len(pullSecretNames)) - for i, imagePullSecretName := range pullSecretNames.List() { - imagePullSecret := corev1.Secret{} - err := r.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: imagePullSecretName}, &imagePullSecret) - if err != nil { - r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.AuthenticationFailedReason, - "auth secret '%s' not found", imagePullSecretName) - return nil, err - } - imagePullSecrets[i] = imagePullSecret - } - return k8schain.NewFromPullSecrets(ctx, imagePullSecrets) } @@ -935,7 +965,7 @@ func (r *OCIRepositoryReconciler) keychain(ctx context.Context, obj *ociv1.OCIRe // the returned transport will include the TLS client and/or CA certificates. // If the insecure flag is set, the transport will skip the verification of the server's certificate. // Additionally, if a proxy is specified, transport will use it. -func (r *OCIRepositoryReconciler) transport(ctx context.Context, obj *ociv1.OCIRepository, proxyURL *url.URL) (*http.Transport, error) { +func (r *OCIRepositoryReconciler) transport(ctx context.Context, obj *sourcev1.OCIRepository, proxyURL *url.URL) (*http.Transport, error) { transport := remote.DefaultTransport.(*http.Transport).Clone() tlsConfig, err := r.getTLSConfig(ctx, obj) @@ -955,9 +985,14 @@ func (r *OCIRepositoryReconciler) transport(ctx context.Context, obj *ociv1.OCIR // getTLSConfig gets the TLS configuration for the transport based on the // specified secret reference in the OCIRepository object, or the insecure flag. -func (r *OCIRepositoryReconciler) getTLSConfig(ctx context.Context, obj *ociv1.OCIRepository) (*cryptotls.Config, error) { +func (r *OCIRepositoryReconciler) getTLSConfig(ctx context.Context, obj *sourcev1.OCIRepository) (*cryptotls.Config, error) { if obj.Spec.CertSecretRef == nil || obj.Spec.CertSecretRef.Name == "" { if obj.Spec.Insecure { + // NOTE: This is the only place in Flux where InsecureSkipVerify is allowed. + // This exception is made for OCIRepository to maintain backward compatibility + // with tools like crane that require insecure connections without certificates. + // This only applies when no CertSecretRef is provided AND insecure is explicitly set. + // All other controllers must NOT allow InsecureSkipVerify per our security policy. return &cryptotls.Config{ InsecureSkipVerify: true, }, nil @@ -965,65 +1000,15 @@ func (r *OCIRepositoryReconciler) getTLSConfig(ctx context.Context, obj *ociv1.O return nil, nil } - certSecretName := types.NamespacedName{ + secretName := types.NamespacedName{ Namespace: obj.Namespace, Name: obj.Spec.CertSecretRef.Name, } - var certSecret corev1.Secret - if err := r.Get(ctx, certSecretName, &certSecret); err != nil { - return nil, err - } - - tlsConfig, _, err := tls.KubeTLSClientConfigFromSecret(certSecret, "") - if err != nil { - return nil, err - } - if tlsConfig == nil { - tlsConfig, _, err = tls.TLSClientConfigFromSecret(certSecret, "") - if err != nil { - return nil, err - } - if tlsConfig != nil { - ctrl.LoggerFrom(ctx). - Info("warning: specifying TLS auth data via `certFile`/`keyFile`/`caFile` is deprecated, please use `tls.crt`/`tls.key`/`ca.crt` instead") - } - } - - return tlsConfig, nil -} - -// getProxyURL gets the proxy configuration for the transport based on the -// specified proxy secret reference in the OCIRepository object. -func (r *OCIRepositoryReconciler) getProxyURL(ctx context.Context, obj *ociv1.OCIRepository) (*url.URL, error) { - if obj.Spec.ProxySecretRef == nil || obj.Spec.ProxySecretRef.Name == "" { - return nil, nil - } - - proxySecretName := types.NamespacedName{ - Namespace: obj.Namespace, - Name: obj.Spec.ProxySecretRef.Name, - } - var proxySecret corev1.Secret - if err := r.Get(ctx, proxySecretName, &proxySecret); err != nil { - return nil, err - } - - proxyData := proxySecret.Data - address, ok := proxyData["address"] - if !ok { - return nil, fmt.Errorf("invalid proxy secret '%s/%s': key 'address' is missing", - obj.Namespace, obj.Spec.ProxySecretRef.Name) - } - proxyURL, err := url.Parse(string(address)) - if err != nil { - return nil, fmt.Errorf("failed to parse proxy address '%s': %w", address, err) - } - user, hasUser := proxyData["username"] - password, hasPassword := proxyData["password"] - if hasUser || hasPassword { - proxyURL.User = url.UserPassword(string(user), string(password)) - } - return proxyURL, nil + // NOTE: Use WithSystemCertPool to maintain backward compatibility with the existing + // extend approach (system CAs + user CA) rather than the default replace approach (user CA only). + // This ensures source-controller continues to work with both system and user-provided CA certificates. + var tlsOpts = []secrets.TLSConfigOption{secrets.WithSystemCertPool()} + return secrets.TLSConfigFromSecretRef(ctx, r.Client, secretName, tlsOpts...) } // reconcileStorage ensures the current state of the storage matches the @@ -1039,7 +1024,7 @@ func (r *OCIRepositoryReconciler) getProxyURL(ctx context.Context, obj *ociv1.OC // The hostname of any URL in the Status of the object are updated, to ensure // they match the Storage server hostname of current runtime. func (r *OCIRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, - obj *ociv1.OCIRepository, _ *sourcev1.Artifact, _ string) (sreconcile.Result, error) { + obj *sourcev1.OCIRepository, _ *meta.Artifact, _ string) (sreconcile.Result, error) { // Garbage collect previous advertised artifact(s) from storage _ = r.garbageCollect(ctx, obj) @@ -1077,7 +1062,7 @@ func (r *OCIRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patc if artifactMissing { msg += ": disappeared from storage" } - rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, msg) + rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg) conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil { return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason) @@ -1096,13 +1081,13 @@ func (r *OCIRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patc // (Status) data on the object does not match the given. // // The inspection of the given data to the object is differed, ensuring any -// stale observations like v1beta2.ArtifactOutdatedCondition are removed. +// stale observations like v1.ArtifactOutdatedCondition are removed. // If the given Artifact does not differ from the object's current, it returns // early. // On a successful archive, the Artifact in the Status of the object is set, // and the symlink in the Storage is updated to its path. func (r *OCIRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, - obj *ociv1.OCIRepository, metadata *sourcev1.Artifact, dir string) (sreconcile.Result, error) { + obj *sourcev1.OCIRepository, metadata *meta.Artifact, dir string) (sreconcile.Result, error) { // Create artifact artifact := r.Storage.NewArtifactFor(obj.Kind, obj, metadata.Revision, fmt.Sprintf("%s.tar.gz", r.digestFromRevision(metadata.Revision))) @@ -1159,7 +1144,7 @@ func (r *OCIRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pat defer unlock() switch obj.GetLayerOperation() { - case ociv1.OCILayerCopy: + case sourcev1.OCILayerCopy: if err = r.Storage.CopyFromPath(&artifact, filepath.Join(dir, metadata.Path)); err != nil { e := serror.NewGeneric( fmt.Errorf("unable to copy artifact to storage: %w", err), @@ -1182,7 +1167,7 @@ func (r *OCIRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pat ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), ignoreDomain)...) } - if err := r.Storage.Archive(&artifact, dir, SourceIgnoreFilter(ps, ignoreDomain)); err != nil { + if err := r.Storage.Archive(&artifact, dir, storage.SourceIgnoreFilter(ps, ignoreDomain)); err != nil { e := serror.NewGeneric( fmt.Errorf("unable to archive artifact to storage: %s", err), sourcev1.ArchiveOperationFailedReason, @@ -1195,7 +1180,6 @@ func (r *OCIRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pat // Record the observations on the object. obj.Status.Artifact = artifact.DeepCopy() obj.Status.Artifact.Metadata = metadata.Metadata - obj.Status.ContentConfigChecksum = "" // To be removed in the next API version. obj.Status.ObservedIgnore = obj.Spec.Ignore obj.Status.ObservedLayerSelector = obj.Spec.LayerSelector @@ -1215,7 +1199,7 @@ func (r *OCIRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pat // reconcileDelete handles the deletion of the object. // It first garbage collects all Artifacts for the object from the Storage. // Removing the finalizer from the object if successful. -func (r *OCIRepositoryReconciler) reconcileDelete(ctx context.Context, obj *ociv1.OCIRepository) (sreconcile.Result, error) { +func (r *OCIRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.OCIRepository) (sreconcile.Result, error) { // Garbage collect the resource's artifacts if err := r.garbageCollect(ctx, obj); err != nil { // Return the error so we retry the failed garbage collection @@ -1225,6 +1209,10 @@ func (r *OCIRepositoryReconciler) reconcileDelete(ctx context.Context, obj *ociv // Remove our finalizer from the list controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + // Cleanup caches. + r.TokenCache.DeleteEventsForObject(sourcev1.OCIRepositoryKind, + obj.GetName(), obj.GetNamespace(), cache.OperationReconcile) + // Stop reconciliation as the object is being deleted return sreconcile.ResultEmpty, nil } @@ -1234,7 +1222,7 @@ func (r *OCIRepositoryReconciler) reconcileDelete(ctx context.Context, obj *ociv // It removes all but the current Artifact from the Storage, unless the // deletion timestamp on the object is set. Which will result in the // removal of all Artifacts for the objects. -func (r *OCIRepositoryReconciler) garbageCollect(ctx context.Context, obj *ociv1.OCIRepository) error { +func (r *OCIRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.OCIRepository) error { if !obj.DeletionTimestamp.IsZero() { if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { return serror.NewGeneric( @@ -1258,7 +1246,7 @@ func (r *OCIRepositoryReconciler) garbageCollect(ctx context.Context, obj *ociv1 } if len(delFiles) > 0 { r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", - fmt.Sprintf("garbage collected %d artifacts", len(delFiles))) + "garbage collected %d artifacts", len(delFiles)) return nil } } @@ -1282,7 +1270,7 @@ func (r *OCIRepositoryReconciler) eventLogf(ctx context.Context, obj runtime.Obj } // notify emits notification related to the reconciliation. -func (r *OCIRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *ociv1.OCIRepository, res sreconcile.Result, resErr error) { +func (r *OCIRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.OCIRepository, res sreconcile.Result, resErr error) { // Notify successful reconciliation for new artifact and recovery from any // failure. if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil { @@ -1348,7 +1336,7 @@ type remoteOptions []remote.Option // ociContentConfigChanged evaluates the current spec with the observations // of the artifact in the status to determine if artifact content configuration // has changed and requires rebuilding the artifact. -func ociContentConfigChanged(obj *ociv1.OCIRepository) bool { +func ociContentConfigChanged(obj *sourcev1.OCIRepository) bool { if !ptr.Equal(obj.Spec.Ignore, obj.Status.ObservedIgnore) { return true } @@ -1363,7 +1351,7 @@ func ociContentConfigChanged(obj *ociv1.OCIRepository) bool { // Returns true if both arguments are nil or both arguments // dereference to the same value. // Based on k8s.io/utils/pointer/pointer.go pointer value equality. -func layerSelectorEqual(a, b *ociv1.OCILayerSelector) bool { +func layerSelectorEqual(a, b *sourcev1.OCILayerSelector) bool { if (a == nil) != (b == nil) { return false } diff --git a/internal/controller/ocirepository_controller_test.go b/internal/controller/ocirepository_controller_test.go index 794c458d9..6ea35e962 100644 --- a/internal/controller/ocirepository_controller_test.go +++ b/internal/controller/ocirepository_controller_test.go @@ -60,6 +60,9 @@ import ( kstatus "github.com/fluxcd/cli-utils/pkg/kstatus/status" "github.com/fluxcd/pkg/apis/meta" + intdigest "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" "github.com/fluxcd/pkg/git" "github.com/fluxcd/pkg/oci" "github.com/fluxcd/pkg/runtime/conditions" @@ -68,8 +71,6 @@ import ( "github.com/fluxcd/pkg/tar" sourcev1 "github.com/fluxcd/source-controller/api/v1" - ociv1 "github.com/fluxcd/source-controller/api/v1beta2" - intdigest "github.com/fluxcd/source-controller/internal/digest" serror "github.com/fluxcd/source-controller/internal/error" snotation "github.com/fluxcd/source-controller/internal/oci/notation" sreconcile "github.com/fluxcd/source-controller/internal/reconcile" @@ -88,10 +89,10 @@ func TestOCIRepositoryReconciler_deleteBeforeFinalizer(t *testing.T) { g.Expect(k8sClient.Delete(ctx, namespace)).NotTo(HaveOccurred()) }) - ocirepo := &ociv1.OCIRepository{} + ocirepo := &sourcev1.OCIRepository{} ocirepo.Name = "test-ocirepo" ocirepo.Namespace = namespaceName - ocirepo.Spec = ociv1.OCIRepositorySpec{ + ocirepo.Spec = sourcev1.OCIRepositorySpec{ Interval: metav1.Duration{Duration: interval}, URL: "oci://example.com", } @@ -143,7 +144,7 @@ func TestOCIRepository_Reconcile(t *testing.T) { tag: podinfoVersions["6.1.6"].tag, revision: fmt.Sprintf("%s@%s", podinfoVersions["6.1.6"].tag, podinfoVersions["6.1.6"].digest.String()), mediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - operation: ociv1.OCILayerCopy, + operation: sourcev1.OCILayerCopy, assertArtifact: []artifactFixture{ { expectedPath: "kustomize/deployment.yaml", @@ -181,15 +182,15 @@ func TestOCIRepository_Reconcile(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() - origObj := &ociv1.OCIRepository{ + origObj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "ocirepository-reconcile", Namespace: ns.Name, }, - Spec: ociv1.OCIRepositorySpec{ + Spec: sourcev1.OCIRepositorySpec{ URL: tt.url, Interval: metav1.Duration{Duration: 60 * time.Minute}, - Reference: &ociv1.OCIRepositoryRef{}, + Reference: &sourcev1.OCIRepositoryRef{}, Insecure: true, }, } @@ -202,7 +203,7 @@ func TestOCIRepository_Reconcile(t *testing.T) { obj.Spec.Reference.SemVer = tt.semver } if tt.mediaType != "" { - obj.Spec.LayerSelector = &ociv1.OCILayerSelector{MediaType: tt.mediaType} + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{MediaType: tt.mediaType} if tt.operation != "" { obj.Spec.LayerSelector.Operation = tt.operation @@ -349,18 +350,18 @@ func TestOCIRepository_Reconcile_MediaType(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "ocirepository-reconcile", Namespace: ns.Name, }, - Spec: ociv1.OCIRepositorySpec{ + Spec: sourcev1.OCIRepositorySpec{ URL: tt.url, Interval: metav1.Duration{Duration: 60 * time.Minute}, - Reference: &ociv1.OCIRepositoryRef{ + Reference: &sourcev1.OCIRepositoryRef{ Tag: tt.tag, }, - LayerSelector: &ociv1.OCILayerSelector{ + LayerSelector: &sourcev1.OCILayerSelector{ MediaType: tt.mediaType, }, Insecure: true, @@ -504,7 +505,7 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { crane.Insecure, }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, ociv1.OCIPullFailedReason, "failed to determine artifact digest"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "%s", "failed to determine artifact digest"), }, }, { @@ -528,7 +529,7 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { includeSecret: true, }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, ociv1.OCIPullFailedReason, "UNAUTHORIZED"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "%s", "UNAUTHORIZED"), }, }, { @@ -552,7 +553,7 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { includeSA: true, }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, ociv1.OCIPullFailedReason, "UNAUTHORIZED"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "%s", "UNAUTHORIZED"), }, }, { @@ -576,8 +577,8 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { }, }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), - *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), }, }, { @@ -601,8 +602,8 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { }, }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), - *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), }, }, { @@ -619,7 +620,7 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { }), }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, ociv1.OCIPullFailedReason, "failed to determine artifact digest"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "%s", "failed to determine artifact digest"), }, }, { @@ -644,7 +645,7 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { }, }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, ociv1.AuthenticationFailedReason, "cannot append certificate into certificate pool: invalid CA certificate"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "%s", "failed to parse CA certificate"), }, }, { @@ -669,8 +670,8 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { }, }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), - *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), }, }, { @@ -682,7 +683,7 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { crane.Insecure, }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get credential from"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "%s", "failed to get credential from"), }, }, { @@ -706,8 +707,8 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { insecure: true, provider: "azure", assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), - *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), + *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), + *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "%s", "building artifact: new revision '' for ''"), }, }, } @@ -718,14 +719,14 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&ociv1.OCIRepository{}) + WithStatusSubresource(&sourcev1.OCIRepository{}) - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "auth-strategy-", Generation: 1, }, - Spec: ociv1.OCIRepositorySpec{ + Spec: sourcev1.OCIRepositorySpec{ Interval: metav1.Duration{Duration: interval}, Timeout: &metav1.Duration{Duration: timeout}, }, @@ -741,7 +742,7 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { img, err := createPodinfoImageFromTar("podinfo-6.1.6.tar", "6.1.6", server.registryHost, tt.craneOpts...) g.Expect(err).ToNot(HaveOccurred()) obj.Spec.URL = img.url - obj.Spec.Reference = &ociv1.OCIRepositoryRef{ + obj.Spec.Reference = &sourcev1.OCIRepositoryRef{ Tag: img.tag, } @@ -821,7 +822,7 @@ func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) { sp := patch.NewSerialPatcher(obj, r.Client) tmpDir := t.TempDir() - got, err := r.reconcileSource(ctx, sp, obj, &sourcev1.Artifact{}, tmpDir) + got, err := r.reconcileSource(ctx, sp, obj, &meta.Artifact{}, tmpDir) if tt.wantErr { g.Expect(err).ToNot(BeNil()) } else { @@ -872,9 +873,9 @@ func TestOCIRepository_CertSecret(t *testing.T) { tlsSecretClientCert := corev1.Secret{ Data: map[string][]byte{ - oci.CACert: tlsCA, - oci.ClientCert: clientPublicKey, - oci.ClientKey: clientPrivateKey, + "caFile": tlsCA, + "certFile": clientPublicKey, + "keyFile": clientPrivateKey, }, } @@ -907,13 +908,13 @@ func TestOCIRepository_CertSecret(t *testing.T) { digest: pi.digest, certSecret: &corev1.Secret{ Data: map[string][]byte{ - oci.CACert: tlsCA, - oci.ClientCert: clientPublicKey, - oci.ClientKey: []byte("invalid-key"), + "caFile": tlsCA, + "certFile": clientPublicKey, + "keyFile": []byte("invalid-key"), }, }, expectreadyconition: false, - expectedstatusmessage: "failed to generate transport for '': tls: failed to find any PEM data in key input", + expectedstatusmessage: "failed to generate transport for '': failed to parse TLS certificate and key: tls: failed to find any PEM data in key input", }, } @@ -925,16 +926,16 @@ func TestOCIRepository_CertSecret(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "ocirepository-test-resource", Namespace: ns.Name, Generation: 1, }, - Spec: ociv1.OCIRepositorySpec{ + Spec: sourcev1.OCIRepositorySpec{ URL: tt.url, Interval: metav1.Duration{Duration: 60 * time.Minute}, - Reference: &ociv1.OCIRepositoryRef{Digest: tt.digest.String()}, + Reference: &sourcev1.OCIRepositoryRef{Digest: tt.digest.String()}, }, } @@ -954,7 +955,7 @@ func TestOCIRepository_CertSecret(t *testing.T) { key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} - resultobj := ociv1.OCIRepository{} + resultobj := sourcev1.OCIRepository{} // Wait for the finalizer to be set g.Eventually(func() bool { @@ -1051,16 +1052,16 @@ func TestOCIRepository_ProxySecret(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "ocirepository-test-resource", Namespace: ns.Name, Generation: 1, }, - Spec: ociv1.OCIRepositorySpec{ + Spec: sourcev1.OCIRepositorySpec{ URL: tt.url, Interval: metav1.Duration{Duration: 60 * time.Minute}, - Reference: &ociv1.OCIRepositoryRef{Digest: tt.digest.String()}, + Reference: &sourcev1.OCIRepositoryRef{Digest: tt.digest.String()}, }, } @@ -1080,7 +1081,7 @@ func TestOCIRepository_ProxySecret(t *testing.T) { key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} - resultobj := ociv1.OCIRepository{} + resultobj := sourcev1.OCIRepository{} // Wait for the finalizer to be set g.Eventually(func() bool { @@ -1138,7 +1139,7 @@ func TestOCIRepository_reconcileSource_remoteReference(t *testing.T) { tests := []struct { name string - reference *ociv1.OCIRepositoryRef + reference *sourcev1.OCIRepositoryRef want sreconcile.Result wantErr bool wantRevision string @@ -1155,7 +1156,7 @@ func TestOCIRepository_reconcileSource_remoteReference(t *testing.T) { }, { name: "tag reference", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.6", }, want: sreconcile.ResultSuccess, @@ -1167,7 +1168,7 @@ func TestOCIRepository_reconcileSource_remoteReference(t *testing.T) { }, { name: "semver reference", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ SemVer: ">= 6.1.5", }, want: sreconcile.ResultSuccess, @@ -1179,7 +1180,7 @@ func TestOCIRepository_reconcileSource_remoteReference(t *testing.T) { }, { name: "digest reference", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Digest: img6.digest.String(), }, wantRevision: img6.digest.String(), @@ -1191,18 +1192,18 @@ func TestOCIRepository_reconcileSource_remoteReference(t *testing.T) { }, { name: "invalid tag reference", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.0", }, want: sreconcile.ResultEmpty, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, ociv1.OCIPullFailedReason, " MANIFEST_UNKNOWN"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, " MANIFEST_UNKNOWN"), }, }, { name: "invalid semver reference", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ SemVer: "<= 6.1.0", }, want: sreconcile.ResultEmpty, @@ -1213,18 +1214,18 @@ func TestOCIRepository_reconcileSource_remoteReference(t *testing.T) { }, { name: "invalid digest reference", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Digest: "invalid", }, want: sreconcile.ResultEmpty, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, ociv1.OCIPullFailedReason, "failed to determine artifact digest"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "failed to determine artifact digest"), }, }, { name: "semver should take precedence over tag", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ SemVer: ">= 6.1.5", Tag: "6.1.5", }, @@ -1237,7 +1238,7 @@ func TestOCIRepository_reconcileSource_remoteReference(t *testing.T) { }, { name: "digest should take precedence over semver", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.6", SemVer: ">= 6.1.6", Digest: img5.digest.String(), @@ -1253,7 +1254,7 @@ func TestOCIRepository_reconcileSource_remoteReference(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&ociv1.OCIRepository{}) + WithStatusSubresource(&sourcev1.OCIRepository{}) r := &OCIRepositoryReconciler{ Client: clientBuilder.Build(), @@ -1264,12 +1265,12 @@ func TestOCIRepository_reconcileSource_remoteReference(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "checkout-strategy-", Generation: 1, }, - Spec: ociv1.OCIRepositorySpec{ + Spec: sourcev1.OCIRepositorySpec{ URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost), Interval: metav1.Duration{Duration: interval}, Timeout: &metav1.Duration{Duration: timeout}, @@ -1288,7 +1289,7 @@ func TestOCIRepository_reconcileSource_remoteReference(t *testing.T) { sp := patch.NewSerialPatcher(obj, r.Client) - artifact := &sourcev1.Artifact{} + artifact := &meta.Artifact{} tmpDir := t.TempDir() got, err := r.reconcileSource(ctx, sp, obj, artifact, tmpDir) if tt.wantErr { @@ -1309,7 +1310,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignatureNotation(t *testi tests := []struct { name string - reference *ociv1.OCIRepositoryRef + reference *sourcev1.OCIRepositoryRef insecure bool want sreconcile.Result wantErr bool @@ -1318,12 +1319,12 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignatureNotation(t *testi useDigest bool addMultipleCerts bool provideNoCert bool - beforeFunc func(obj *ociv1.OCIRepository, tag, revision string) + beforeFunc func(obj *sourcev1.OCIRepository, tag, revision string) assertConditions []metav1.Condition }{ { name: "signed image should pass verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.4", }, shouldSign: true, @@ -1336,7 +1337,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignatureNotation(t *testi }, { name: "unsigned image should not pass verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.5", }, wantErr: true, @@ -1351,20 +1352,20 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignatureNotation(t *testi }, { name: "verify failed before, removed from spec, remove condition", - reference: &ociv1.OCIRepositoryRef{Tag: "6.1.4"}, - beforeFunc: func(obj *ociv1.OCIRepository, tag, revision string) { + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, "VerifyFailed", "fail msg") obj.Spec.Verify = nil - obj.Status.Artifact = &sourcev1.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} }, want: sreconcile.ResultSuccess, }, { name: "same artifact, verified before, change in obj gen verify again", - reference: &ociv1.OCIRepositoryRef{Tag: "6.1.4"}, + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, shouldSign: true, - beforeFunc: func(obj *ociv1.OCIRepository, tag, revision string) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} // Set Verified with old observed generation and different reason/message. conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Verified", "verified") // Set new object generation. @@ -1377,11 +1378,11 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignatureNotation(t *testi }, { name: "no verify for already verified, verified condition remains the same", - reference: &ociv1.OCIRepositoryRef{Tag: "6.1.4"}, + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, shouldSign: true, - beforeFunc: func(obj *ociv1.OCIRepository, tag, revision string) { + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { // Artifact present and custom verified condition reason/message. - obj.Status.Artifact = &sourcev1.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Verified", "verified") }, want: sreconcile.ResultSuccess, @@ -1391,7 +1392,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignatureNotation(t *testi }, { name: "signed image on an insecure registry passes verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.6", }, shouldSign: true, @@ -1405,7 +1406,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignatureNotation(t *testi }, { name: "signed image on an insecure registry using digest as reference passes verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.6", }, shouldSign: true, @@ -1420,7 +1421,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignatureNotation(t *testi }, { name: "verification level audit and correct trust identity should pass verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.6", }, shouldSign: true, @@ -1436,7 +1437,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignatureNotation(t *testi }, { name: "no cert provided should not pass verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.5", }, wantErr: true, @@ -1455,7 +1456,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignatureNotation(t *testi clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&ociv1.OCIRepository{}) + WithStatusSubresource(&sourcev1.OCIRepository{}) r := &OCIRepositoryReconciler{ Client: clientBuilder.Build(), @@ -1513,12 +1514,12 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignatureNotation(t *testi server.Close() }) - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "verify-oci-source-signature-", Generation: 1, }, - Spec: ociv1.OCIRepositorySpec{ + Spec: sourcev1.OCIRepositorySpec{ URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost), Verify: &sourcev1.OCIRepositoryVerification{ Provider: "notation", @@ -1629,7 +1630,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignatureNotation(t *testi sp := patch.NewSerialPatcher(obj, r.Client) - artifact := &sourcev1.Artifact{} + artifact := &meta.Artifact{} got, err := r.reconcileSource(ctx, sp, obj, artifact, tmpDir) if tt.wantErr { tt.wantErrMsg = strings.ReplaceAll(tt.wantErrMsg, "", artifactRef.String()) @@ -1649,7 +1650,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceTrustPolicyNotation(t *tes tests := []struct { name string - reference *ociv1.OCIRepositoryRef + reference *sourcev1.OCIRepositoryRef signatureVerification trustpolicy.SignatureVerification trustedIdentities []string trustStores []string @@ -1660,12 +1661,12 @@ func TestOCIRepository_reconcileSource_verifyOCISourceTrustPolicyNotation(t *tes usePolicyJson bool provideNoPolicy bool policyJson string - beforeFunc func(obj *ociv1.OCIRepository, tag, revision string) + beforeFunc func(obj *sourcev1.OCIRepository, tag, revision string) assertConditions []metav1.Condition }{ { name: "verification level audit and incorrect trust identity should fail verification but not error", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.4", }, signatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelAudit.Name}, @@ -1679,7 +1680,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceTrustPolicyNotation(t *tes }, { name: "verification level permissive and incorrect trust identity should fail verification and error", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.4", }, signatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelPermissive.Name}, @@ -1697,7 +1698,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceTrustPolicyNotation(t *tes }, { name: "verification level permissive and correct trust identity should pass verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.4", }, signatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelPermissive.Name}, @@ -1712,7 +1713,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceTrustPolicyNotation(t *tes }, { name: "verification level audit and correct trust identity should pass verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.4", }, signatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelAudit.Name}, @@ -1727,7 +1728,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceTrustPolicyNotation(t *tes }, { name: "verification level skip and should not be marked as verified", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.4", }, signatureVerification: trustpolicy.SignatureVerification{VerificationLevel: trustpolicy.LevelSkip.Name}, @@ -1740,7 +1741,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceTrustPolicyNotation(t *tes }, { name: "valid json but empty policy json should fail verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.4", }, usePolicyJson: true, @@ -1756,7 +1757,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceTrustPolicyNotation(t *tes }, { name: "empty string should fail verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.4", }, usePolicyJson: true, @@ -1767,12 +1768,12 @@ func TestOCIRepository_reconcileSource_verifyOCISourceTrustPolicyNotation(t *tes assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), - *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, fmt.Sprintf("error occurred while parsing %s: unexpected end of JSON input", snotation.DefaultTrustPolicyKey)), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "error occurred while parsing %s: unexpected end of JSON input", snotation.DefaultTrustPolicyKey), }, }, { name: "invalid character in string should fail verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.4", }, usePolicyJson: true, @@ -1783,12 +1784,12 @@ func TestOCIRepository_reconcileSource_verifyOCISourceTrustPolicyNotation(t *tes assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), - *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, fmt.Sprintf("error occurred while parsing %s: invalid character '\\n' in string literal", snotation.DefaultTrustPolicyKey)), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "error occurred while parsing %s: invalid character '\\n' in string literal", snotation.DefaultTrustPolicyKey), }, }, { name: "empty string should fail verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.4", }, provideNoPolicy: true, @@ -1798,14 +1799,14 @@ func TestOCIRepository_reconcileSource_verifyOCISourceTrustPolicyNotation(t *tes assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), *conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new revision '' for ''"), - *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, fmt.Sprintf("failed to verify the signature using provider 'notation': '%s' not found in secret '/notation", snotation.DefaultTrustPolicyKey)), + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, sourcev1.VerificationError, "failed to verify the signature using provider 'notation': '%s' not found in secret '/notation", snotation.DefaultTrustPolicyKey), }, }, } clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&ociv1.OCIRepository{}) + WithStatusSubresource(&sourcev1.OCIRepository{}) r := &OCIRepositoryReconciler{ Client: clientBuilder.Build(), @@ -1833,12 +1834,12 @@ func TestOCIRepository_reconcileSource_verifyOCISourceTrustPolicyNotation(t *tes server.Close() }) - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "verify-oci-source-signature-", Generation: 1, }, - Spec: ociv1.OCIRepositorySpec{ + Spec: sourcev1.OCIRepositorySpec{ URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost), Verify: &sourcev1.OCIRepositoryVerification{ Provider: "notation", @@ -1968,7 +1969,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceTrustPolicyNotation(t *tes sp := patch.NewSerialPatcher(obj, r.Client) - artifact := &sourcev1.Artifact{} + artifact := &meta.Artifact{} got, err := r.reconcileSource(ctx, sp, obj, artifact, tmpDir) g.Expect(r.Delete(ctx, secret)).NotTo(HaveOccurred()) if tt.wantErr { @@ -1992,19 +1993,19 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignatureCosign(t *testing tests := []struct { name string - reference *ociv1.OCIRepositoryRef + reference *sourcev1.OCIRepositoryRef insecure bool want sreconcile.Result wantErr bool wantErrMsg string shouldSign bool keyless bool - beforeFunc func(obj *ociv1.OCIRepository, tag, revision string) + beforeFunc func(obj *sourcev1.OCIRepository, tag, revision string) assertConditions []metav1.Condition }{ { name: "signed image should pass verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.4", }, shouldSign: true, @@ -2017,7 +2018,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignatureCosign(t *testing }, { name: "unsigned image should not pass verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.5", }, wantErr: true, @@ -2031,7 +2032,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignatureCosign(t *testing }, { name: "unsigned image should not pass keyless verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.5", }, wantErr: true, @@ -2045,20 +2046,20 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignatureCosign(t *testing }, { name: "verify failed before, removed from spec, remove condition", - reference: &ociv1.OCIRepositoryRef{Tag: "6.1.4"}, - beforeFunc: func(obj *ociv1.OCIRepository, tag, revision string) { + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, "VerifyFailed", "fail msg") obj.Spec.Verify = nil - obj.Status.Artifact = &sourcev1.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} }, want: sreconcile.ResultSuccess, }, { name: "same artifact, verified before, change in obj gen verify again", - reference: &ociv1.OCIRepositoryRef{Tag: "6.1.4"}, + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, shouldSign: true, - beforeFunc: func(obj *ociv1.OCIRepository, tag, revision string) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} // Set Verified with old observed generation and different reason/message. conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Verified", "verified") // Set new object generation. @@ -2071,11 +2072,11 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignatureCosign(t *testing }, { name: "no verify for already verified, verified condition remains the same", - reference: &ociv1.OCIRepositoryRef{Tag: "6.1.4"}, + reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.4"}, shouldSign: true, - beforeFunc: func(obj *ociv1.OCIRepository, tag, revision string) { + beforeFunc: func(obj *sourcev1.OCIRepository, tag, revision string) { // Artifact present and custom verified condition reason/message. - obj.Status.Artifact = &sourcev1.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} + obj.Status.Artifact = &meta.Artifact{Revision: fmt.Sprintf("%s@%s", tag, revision)} conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Verified", "verified") }, want: sreconcile.ResultSuccess, @@ -2085,7 +2086,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignatureCosign(t *testing }, { name: "signed image on an insecure registry passes verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.6", }, shouldSign: true, @@ -2101,7 +2102,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignatureCosign(t *testing clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&ociv1.OCIRepository{}) + WithStatusSubresource(&sourcev1.OCIRepository{}) r := &OCIRepositoryReconciler{ Client: clientBuilder.Build(), @@ -2157,12 +2158,12 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignatureCosign(t *testing server.Close() }) - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "verify-oci-source-signature-", Generation: 1, }, - Spec: ociv1.OCIRepositorySpec{ + Spec: sourcev1.OCIRepositorySpec{ URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost), Verify: &sourcev1.OCIRepositoryVerification{ Provider: "cosign", @@ -2240,7 +2241,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignatureCosign(t *testing sp := patch.NewSerialPatcher(obj, r.Client) - artifact := &sourcev1.Artifact{} + artifact := &meta.Artifact{} got, err := r.reconcileSource(ctx, sp, obj, artifact, tmpDir) if tt.wantErr { tt.wantErrMsg = strings.ReplaceAll(tt.wantErrMsg, "", artifactRef.String()) @@ -2258,17 +2259,17 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignatureCosign(t *testing func TestOCIRepository_reconcileSource_verifyOCISourceSignature_keyless(t *testing.T) { tests := []struct { name string - reference *ociv1.OCIRepositoryRef + reference *sourcev1.OCIRepositoryRef want sreconcile.Result wantErr bool wantErrMsg string - beforeFunc func(obj *ociv1.OCIRepository) + beforeFunc func(obj *sourcev1.OCIRepository) assertConditions []metav1.Condition revision string }{ { name: "signed image with no identity matching specified should pass verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.5.1", }, want: sreconcile.ResultSuccess, @@ -2281,11 +2282,11 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature_keyless(t *testi }, { name: "signed image with correct subject and issuer should pass verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.5.1", }, want: sreconcile.ResultSuccess, - beforeFunc: func(obj *ociv1.OCIRepository) { + beforeFunc: func(obj *sourcev1.OCIRepository) { obj.Spec.Verify.MatchOIDCIdentity = []sourcev1.OIDCIdentityMatch{ { @@ -2303,11 +2304,11 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature_keyless(t *testi }, { name: "signed image with both incorrect and correct identity matchers should pass verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.5.1", }, want: sreconcile.ResultSuccess, - beforeFunc: func(obj *ociv1.OCIRepository) { + beforeFunc: func(obj *sourcev1.OCIRepository) { obj.Spec.Verify.MatchOIDCIdentity = []sourcev1.OIDCIdentityMatch{ { Subject: "intruder", @@ -2329,12 +2330,12 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature_keyless(t *testi }, { name: "signed image with incorrect subject and issuer should not pass verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.5.1", }, wantErr: true, want: sreconcile.ResultEmpty, - beforeFunc: func(obj *ociv1.OCIRepository) { + beforeFunc: func(obj *sourcev1.OCIRepository) { obj.Spec.Verify.MatchOIDCIdentity = []sourcev1.OIDCIdentityMatch{ { Subject: "intruder", @@ -2351,7 +2352,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature_keyless(t *testi }, { name: "unsigned image should not pass verification", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.0", }, wantErr: true, @@ -2367,7 +2368,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature_keyless(t *testi clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&ociv1.OCIRepository{}) + WithStatusSubresource(&sourcev1.OCIRepository{}) r := &OCIRepositoryReconciler{ Client: clientBuilder.Build(), @@ -2380,12 +2381,12 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature_keyless(t *testi t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "verify-oci-source-signature-", Generation: 1, }, - Spec: ociv1.OCIRepositorySpec{ + Spec: sourcev1.OCIRepositorySpec{ URL: "oci://ghcr.io/stefanprodan/manifests/podinfo", Verify: &sourcev1.OCIRepositoryVerification{ Provider: "cosign", @@ -2415,7 +2416,7 @@ func TestOCIRepository_reconcileSource_verifyOCISourceSignature_keyless(t *testi sp := patch.NewSerialPatcher(obj, r.Client) - artifact := &sourcev1.Artifact{} + artifact := &meta.Artifact{} got, err := r.reconcileSource(ctx, sp, obj, artifact, t.TempDir()) if tt.wantErr { g.Expect(err).To(HaveOccurred()) @@ -2450,101 +2451,101 @@ func TestOCIRepository_reconcileSource_noop(t *testing.T) { tests := []struct { name string - beforeFunc func(obj *ociv1.OCIRepository) - afterFunc func(g *WithT, artifact *sourcev1.Artifact) + beforeFunc func(obj *sourcev1.OCIRepository) + afterFunc func(g *WithT, artifact *meta.Artifact) }{ { name: "full reconcile - no existing artifact", - afterFunc: func(g *WithT, artifact *sourcev1.Artifact) { + afterFunc: func(g *WithT, artifact *meta.Artifact) { g.Expect(artifact.Metadata).ToNot(BeEmpty()) }, }, { name: "noop - artifact revisions match", - beforeFunc: func(obj *ociv1.OCIRepository) { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{ Revision: testRevision, } }, - afterFunc: func(g *WithT, artifact *sourcev1.Artifact) { + afterFunc: func(g *WithT, artifact *meta.Artifact) { g.Expect(artifact.Metadata).To(BeEmpty()) }, }, { name: "full reconcile - same rev, unobserved ignore", - beforeFunc: func(obj *ociv1.OCIRepository) { + beforeFunc: func(obj *sourcev1.OCIRepository) { obj.Status.ObservedIgnore = ptr.To("aaa") - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Revision: testRevision, } }, - afterFunc: func(g *WithT, artifact *sourcev1.Artifact) { + afterFunc: func(g *WithT, artifact *meta.Artifact) { g.Expect(artifact.Metadata).ToNot(BeEmpty()) }, }, { name: "noop - same rev, observed ignore", - beforeFunc: func(obj *ociv1.OCIRepository) { + beforeFunc: func(obj *sourcev1.OCIRepository) { obj.Spec.Ignore = ptr.To("aaa") obj.Status.ObservedIgnore = ptr.To("aaa") - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Revision: testRevision, } }, - afterFunc: func(g *WithT, artifact *sourcev1.Artifact) { + afterFunc: func(g *WithT, artifact *meta.Artifact) { g.Expect(artifact.Metadata).To(BeEmpty()) }, }, { name: "full reconcile - same rev, unobserved layer selector", - beforeFunc: func(obj *ociv1.OCIRepository) { - obj.Spec.LayerSelector = &ociv1.OCILayerSelector{ + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - Operation: ociv1.OCILayerCopy, + Operation: sourcev1.OCILayerCopy, } - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Revision: testRevision, } }, - afterFunc: func(g *WithT, artifact *sourcev1.Artifact) { + afterFunc: func(g *WithT, artifact *meta.Artifact) { g.Expect(artifact.Metadata).ToNot(BeEmpty()) }, }, { name: "noop - same rev, observed layer selector", - beforeFunc: func(obj *ociv1.OCIRepository) { - obj.Spec.LayerSelector = &ociv1.OCILayerSelector{ + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - Operation: ociv1.OCILayerCopy, + Operation: sourcev1.OCILayerCopy, } - obj.Status.ObservedLayerSelector = &ociv1.OCILayerSelector{ + obj.Status.ObservedLayerSelector = &sourcev1.OCILayerSelector{ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - Operation: ociv1.OCILayerCopy, + Operation: sourcev1.OCILayerCopy, } - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Revision: testRevision, } }, - afterFunc: func(g *WithT, artifact *sourcev1.Artifact) { + afterFunc: func(g *WithT, artifact *meta.Artifact) { g.Expect(artifact.Metadata).To(BeEmpty()) }, }, { name: "full reconcile - same rev, observed layer selector changed", - beforeFunc: func(obj *ociv1.OCIRepository) { - obj.Spec.LayerSelector = &ociv1.OCILayerSelector{ + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - Operation: ociv1.OCILayerExtract, + Operation: sourcev1.OCILayerExtract, } - obj.Status.ObservedLayerSelector = &ociv1.OCILayerSelector{ + obj.Status.ObservedLayerSelector = &sourcev1.OCILayerSelector{ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - Operation: ociv1.OCILayerCopy, + Operation: sourcev1.OCILayerCopy, } - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Revision: testRevision, } }, - afterFunc: func(g *WithT, artifact *sourcev1.Artifact) { + afterFunc: func(g *WithT, artifact *meta.Artifact) { g.Expect(artifact.Metadata).ToNot(BeEmpty()) }, }, @@ -2552,7 +2553,7 @@ func TestOCIRepository_reconcileSource_noop(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&ociv1.OCIRepository{}) + WithStatusSubresource(&sourcev1.OCIRepository{}) r := &OCIRepositoryReconciler{ Client: clientBuilder.Build(), @@ -2565,14 +2566,14 @@ func TestOCIRepository_reconcileSource_noop(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "noop-", Generation: 1, }, - Spec: ociv1.OCIRepositorySpec{ + Spec: sourcev1.OCIRepositorySpec{ URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost), - Reference: &ociv1.OCIRepositoryRef{Tag: "6.1.5"}, + Reference: &sourcev1.OCIRepositoryRef{Tag: "6.1.5"}, Interval: metav1.Duration{Duration: interval}, Timeout: &metav1.Duration{Duration: timeout}, Insecure: true, @@ -2590,7 +2591,7 @@ func TestOCIRepository_reconcileSource_noop(t *testing.T) { sp := patch.NewSerialPatcher(obj, r.Client) - artifact := &sourcev1.Artifact{} + artifact := &meta.Artifact{} tmpDir := t.TempDir() got, err := r.reconcileSource(ctx, sp, obj, artifact, tmpDir) g.Expect(err).ToNot(HaveOccurred()) @@ -2607,29 +2608,29 @@ func TestOCIRepository_reconcileArtifact(t *testing.T) { tests := []struct { name string targetPath string - artifact *sourcev1.Artifact - beforeFunc func(obj *ociv1.OCIRepository) + artifact *meta.Artifact + beforeFunc func(obj *sourcev1.OCIRepository) want sreconcile.Result wantErr bool - assertArtifact *sourcev1.Artifact + assertArtifact *meta.Artifact assertPaths []string assertConditions []metav1.Condition - afterFunc func(g *WithT, obj *ociv1.OCIRepository) + afterFunc func(g *WithT, obj *sourcev1.OCIRepository) }{ { name: "Archiving Artifact creates correct files and condition", targetPath: "testdata/oci/repository", - artifact: &sourcev1.Artifact{ + artifact: &meta.Artifact{ Revision: "revision", }, - beforeFunc: func(obj *ociv1.OCIRepository) { + beforeFunc: func(obj *sourcev1.OCIRepository) { conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new revision") }, want: sreconcile.ResultSuccess, assertPaths: []string{ "latest.tar.gz", }, - afterFunc: func(g *WithT, obj *ociv1.OCIRepository) { + afterFunc: func(g *WithT, obj *sourcev1.OCIRepository) { g.Expect(obj.Status.Artifact.Digest).To(Equal("sha256:6a5bd135a816ec0ad246c41cfdd87629e40ef6520001aeb2d0118a703abe9e7a")) }, assertConditions: []metav1.Condition{ @@ -2639,15 +2640,15 @@ func TestOCIRepository_reconcileArtifact(t *testing.T) { { name: "Artifact with source ignore", targetPath: "testdata/oci/repository", - artifact: &sourcev1.Artifact{Revision: "revision"}, - beforeFunc: func(obj *ociv1.OCIRepository) { + artifact: &meta.Artifact{Revision: "revision"}, + beforeFunc: func(obj *sourcev1.OCIRepository) { obj.Spec.Ignore = ptr.To("foo.txt") }, want: sreconcile.ResultSuccess, assertPaths: []string{ "latest.tar.gz", }, - afterFunc: func(g *WithT, obj *ociv1.OCIRepository) { + afterFunc: func(g *WithT, obj *sourcev1.OCIRepository) { g.Expect(obj.Status.Artifact.Digest).To(Equal("sha256:9102e9c8626e48821a91a4963436f1673cd85f8fb3deb843c992f85b995c38ea")) }, assertConditions: []metav1.Condition{ @@ -2656,17 +2657,17 @@ func TestOCIRepository_reconcileArtifact(t *testing.T) { }, { name: "No status changes if artifact is already present", - artifact: &sourcev1.Artifact{ + artifact: &meta.Artifact{ Revision: "revision", }, targetPath: "testdata/oci/repository", want: sreconcile.ResultSuccess, - beforeFunc: func(obj *ociv1.OCIRepository) { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{ Revision: "revision", } }, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Revision: "revision", }, assertConditions: []metav1.Condition{ @@ -2676,18 +2677,18 @@ func TestOCIRepository_reconcileArtifact(t *testing.T) { { name: "Artifact already present, unobserved ignore, rebuild artifact", targetPath: "testdata/oci/repository", - artifact: &sourcev1.Artifact{ + artifact: &meta.Artifact{ Revision: "revision", }, - beforeFunc: func(obj *ociv1.OCIRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "revision"} + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "revision"} obj.Spec.Ignore = ptr.To("aaa") }, want: sreconcile.ResultSuccess, assertPaths: []string{ "latest.tar.gz", }, - afterFunc: func(g *WithT, obj *ociv1.OCIRepository) { + afterFunc: func(g *WithT, obj *sourcev1.OCIRepository) { g.Expect(*obj.Status.ObservedIgnore).To(Equal("aaa")) }, assertConditions: []metav1.Condition{ @@ -2697,18 +2698,18 @@ func TestOCIRepository_reconcileArtifact(t *testing.T) { { name: "Artifact already present, unobserved layer selector, rebuild artifact", targetPath: "testdata/oci/repository", - artifact: &sourcev1.Artifact{ + artifact: &meta.Artifact{ Revision: "revision", }, - beforeFunc: func(obj *ociv1.OCIRepository) { - obj.Spec.LayerSelector = &ociv1.OCILayerSelector{MediaType: "foo"} - obj.Status.Artifact = &sourcev1.Artifact{Revision: "revision"} + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{MediaType: "foo"} + obj.Status.Artifact = &meta.Artifact{Revision: "revision"} }, want: sreconcile.ResultSuccess, assertPaths: []string{ "latest.tar.gz", }, - afterFunc: func(g *WithT, obj *ociv1.OCIRepository) { + afterFunc: func(g *WithT, obj *sourcev1.OCIRepository) { g.Expect(obj.Status.ObservedLayerSelector.MediaType).To(Equal("foo")) }, assertConditions: []metav1.Condition{ @@ -2718,24 +2719,24 @@ func TestOCIRepository_reconcileArtifact(t *testing.T) { { name: "Artifact already present, observed layer selector changed, rebuild artifact", targetPath: "testdata/oci/repository", - artifact: &sourcev1.Artifact{ + artifact: &meta.Artifact{ Revision: "revision", Path: "foo.txt", }, - beforeFunc: func(obj *ociv1.OCIRepository) { - obj.Spec.LayerSelector = &ociv1.OCILayerSelector{ + beforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{ MediaType: "foo", - Operation: ociv1.OCILayerCopy, + Operation: sourcev1.OCILayerCopy, } - obj.Status.Artifact = &sourcev1.Artifact{Revision: "revision"} + obj.Status.Artifact = &meta.Artifact{Revision: "revision"} }, want: sreconcile.ResultSuccess, assertPaths: []string{ "latest.tar.gz", }, - afterFunc: func(g *WithT, obj *ociv1.OCIRepository) { + afterFunc: func(g *WithT, obj *sourcev1.OCIRepository) { g.Expect(obj.Status.ObservedLayerSelector.MediaType).To(Equal("foo")) - g.Expect(obj.Status.ObservedLayerSelector.Operation).To(Equal(ociv1.OCILayerCopy)) + g.Expect(obj.Status.ObservedLayerSelector.Operation).To(Equal(sourcev1.OCILayerCopy)) }, assertConditions: []metav1.Condition{ *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for digest"), @@ -2744,18 +2745,18 @@ func TestOCIRepository_reconcileArtifact(t *testing.T) { { name: "Artifact already present, observed ignore and layer selector, up-to-date", targetPath: "testdata/oci/repository", - artifact: &sourcev1.Artifact{ + artifact: &meta.Artifact{ Revision: "revision", }, - beforeFunc: func(obj *ociv1.OCIRepository) { + beforeFunc: func(obj *sourcev1.OCIRepository) { obj.Spec.Ignore = ptr.To("aaa") - obj.Spec.LayerSelector = &ociv1.OCILayerSelector{MediaType: "foo"} - obj.Status.Artifact = &sourcev1.Artifact{Revision: "revision"} + obj.Spec.LayerSelector = &sourcev1.OCILayerSelector{MediaType: "foo"} + obj.Status.Artifact = &meta.Artifact{Revision: "revision"} obj.Status.ObservedIgnore = ptr.To("aaa") - obj.Status.ObservedLayerSelector = &ociv1.OCILayerSelector{MediaType: "foo"} + obj.Status.ObservedLayerSelector = &sourcev1.OCILayerSelector{MediaType: "foo"} }, want: sreconcile.ResultSuccess, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Revision: "revision", }, assertConditions: []metav1.Condition{ @@ -2784,7 +2785,7 @@ func TestOCIRepository_reconcileArtifact(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&ociv1.OCIRepository{}) + WithStatusSubresource(&sourcev1.OCIRepository{}) r := &OCIRepositoryReconciler{ Client: clientBuilder.Build(), @@ -2799,7 +2800,7 @@ func TestOCIRepository_reconcileArtifact(t *testing.T) { _ = resetChmod(tt.targetPath, 0o755, 0o644) - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "reconcile-artifact-", Generation: 1, @@ -2809,7 +2810,7 @@ func TestOCIRepository_reconcileArtifact(t *testing.T) { tt.beforeFunc(obj) } - artifact := &sourcev1.Artifact{} + artifact := &meta.Artifact{} if tt.artifact != nil { artifact = tt.artifact } @@ -2872,7 +2873,7 @@ func TestOCIRepository_getArtifactRef(t *testing.T) { tests := []struct { name string url string - reference *ociv1.OCIRepositoryRef + reference *sourcev1.OCIRepositoryRef wantErr bool want string }{ @@ -2884,7 +2885,7 @@ func TestOCIRepository_getArtifactRef(t *testing.T) { { name: "valid url with tag reference", url: "oci://ghcr.io/stefanprodan/charts", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Tag: "6.1.6", }, want: "ghcr.io/stefanprodan/charts:6.1.6", @@ -2892,7 +2893,7 @@ func TestOCIRepository_getArtifactRef(t *testing.T) { { name: "valid url with digest reference", url: "oci://ghcr.io/stefanprodan/charts", - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ Digest: imgs["6.1.6"].digest.String(), }, want: "ghcr.io/stefanprodan/charts@" + imgs["6.1.6"].digest.String(), @@ -2900,7 +2901,7 @@ func TestOCIRepository_getArtifactRef(t *testing.T) { { name: "valid url with semver reference", url: fmt.Sprintf("oci://%s/podinfo", server.registryHost), - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ SemVer: ">= 6.1.6", }, want: server.registryHost + "/podinfo:6.1.6", @@ -2913,7 +2914,7 @@ func TestOCIRepository_getArtifactRef(t *testing.T) { { name: "valid url with semver filter", url: fmt.Sprintf("oci://%s/podinfo", server.registryHost), - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ SemVer: ">= 6.1.x-0", SemverFilter: ".*-rc.*", }, @@ -2922,7 +2923,7 @@ func TestOCIRepository_getArtifactRef(t *testing.T) { { name: "valid url with semver filter and unexisting version", url: fmt.Sprintf("oci://%s/podinfo", server.registryHost), - reference: &ociv1.OCIRepositoryRef{ + reference: &sourcev1.OCIRepositoryRef{ SemVer: ">= 6.1.x-0", SemverFilter: ".*-alpha.*", }, @@ -2932,7 +2933,7 @@ func TestOCIRepository_getArtifactRef(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&ociv1.OCIRepository{}) + WithStatusSubresource(&sourcev1.OCIRepository{}) r := &OCIRepositoryReconciler{ Client: clientBuilder.Build(), @@ -2943,11 +2944,11 @@ func TestOCIRepository_getArtifactRef(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "artifact-url-", }, - Spec: ociv1.OCIRepositorySpec{ + Spec: sourcev1.OCIRepositorySpec{ URL: tt.url, Interval: metav1.Duration{Duration: interval}, Timeout: &metav1.Duration{Duration: timeout}, @@ -2971,19 +2972,19 @@ func TestOCIRepository_getArtifactRef(t *testing.T) { } } -func TestOCIRepository_stalled(t *testing.T) { +func TestOCIRepository_invalidURL(t *testing.T) { g := NewWithT(t) - ns, err := testEnv.CreateNamespace(ctx, "ocirepository-stalled-test") + ns, err := testEnv.CreateNamespace(ctx, "ocirepository-invalid-url-test") g.Expect(err).ToNot(HaveOccurred()) defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "ocirepository-reconcile", Namespace: ns.Name, }, - Spec: ociv1.OCIRepositorySpec{ + Spec: sourcev1.OCIRepositorySpec{ URL: "oci://ghcr.io/test/test:v1", Interval: metav1.Duration{Duration: 60 * time.Minute}, }, @@ -2992,7 +2993,7 @@ func TestOCIRepository_stalled(t *testing.T) { g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} - resultobj := ociv1.OCIRepository{} + resultobj := sourcev1.OCIRepository{} // Wait for the object to fail g.Eventually(func() bool { @@ -3013,24 +3014,92 @@ func TestOCIRepository_stalled(t *testing.T) { g.Expect(stalledCondition.Reason).Should(Equal(sourcev1.URLInvalidReason)) } +func TestOCIRepository_objectLevelWorkloadIdentityFeatureGate(t *testing.T) { + g := NewWithT(t) + + ns, err := testEnv.CreateNamespace(ctx, "ocirepository-olwifg-test") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() + + err = testEnv.Create(ctx, &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns.Name, + Name: "test", + }, + }) + g.Expect(err).NotTo(HaveOccurred()) + + obj := &sourcev1.OCIRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "ocirepository-reconcile", + Namespace: ns.Name, + }, + Spec: sourcev1.OCIRepositorySpec{ + URL: "oci://ghcr.io/stefanprodan/manifests/podinfo", + Interval: metav1.Duration{Duration: 60 * time.Minute}, + Provider: "aws", + ServiceAccountName: "test", + }, + } + + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + resultobj := &sourcev1.OCIRepository{} + + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, resultobj); err != nil { + return false + } + return conditions.IsStalled(resultobj) + }).Should(BeTrue()) + + stalledCondition := conditions.Get(resultobj, meta.StalledCondition) + g.Expect(stalledCondition).ToNot(BeNil()) + g.Expect(stalledCondition.Reason).Should(Equal(meta.FeatureGateDisabledReason)) + g.Expect(stalledCondition.Message).Should(Equal("to use spec.serviceAccountName for provider authentication please enable the ObjectLevelWorkloadIdentity feature gate in the controller")) + + auth.EnableObjectLevelWorkloadIdentity() + t.Cleanup(auth.DisableObjectLevelWorkloadIdentity) + + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, resultobj); err != nil { + return false + } + resultobj.Annotations = map[string]string{ + meta.ReconcileRequestAnnotation: time.Now().Format(time.RFC3339), + } + return testEnv.Update(ctx, resultobj) == nil + }).Should(BeTrue()) + + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, resultobj); err != nil { + return false + } + logOCIRepoStatus(t, resultobj) + return !conditions.IsReady(resultobj) && + conditions.GetReason(resultobj, meta.ReadyCondition) == sourcev1.AuthenticationFailedReason + }).Should(BeTrue()) +} + func TestOCIRepository_reconcileStorage(t *testing.T) { tests := []struct { name string - beforeFunc func(obj *ociv1.OCIRepository, storage *Storage) error + beforeFunc func(obj *sourcev1.OCIRepository, storage *storage.Storage) error want sreconcile.Result wantErr bool assertConditions []metav1.Condition - assertArtifact *sourcev1.Artifact + assertArtifact *meta.Artifact assertPaths []string }{ { name: "garbage collects", - beforeFunc: func(obj *ociv1.OCIRepository, storage *Storage) error { + beforeFunc: func(obj *sourcev1.OCIRepository, storage *storage.Storage) error { revisions := []string{"a", "b", "c", "d"} for n := range revisions { v := revisions[n] - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/oci-reconcile-storage/%s.txt", v), Revision: v, } @@ -3051,7 +3120,7 @@ func TestOCIRepository_reconcileStorage(t *testing.T) { conditions.MarkTrue(obj, meta.ReadyCondition, "foo", "bar") return nil }, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Path: "/oci-reconcile-storage/d.txt", Revision: "d", Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4", @@ -3079,8 +3148,8 @@ func TestOCIRepository_reconcileStorage(t *testing.T) { }, { name: "notices missing artifact in storage", - beforeFunc: func(obj *ociv1.OCIRepository, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.OCIRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ Path: "/oci-reconcile-storage/invalid.txt", Revision: "e", } @@ -3098,10 +3167,10 @@ func TestOCIRepository_reconcileStorage(t *testing.T) { }, { name: "notices empty artifact digest", - beforeFunc: func(obj *ociv1.OCIRepository, storage *Storage) error { + beforeFunc: func(obj *sourcev1.OCIRepository, storage *storage.Storage) error { f := "empty-digest.txt" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/oci-reconcile-storage/%s.txt", f), Revision: "fake", } @@ -3129,10 +3198,10 @@ func TestOCIRepository_reconcileStorage(t *testing.T) { }, { name: "notices artifact digest mismatch", - beforeFunc: func(obj *ociv1.OCIRepository, storage *Storage) error { + beforeFunc: func(obj *sourcev1.OCIRepository, storage *storage.Storage) error { f := "digest-mismatch.txt" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Path: fmt.Sprintf("/oci-reconcile-storage/%s.txt", f), Revision: "fake", } @@ -3160,8 +3229,8 @@ func TestOCIRepository_reconcileStorage(t *testing.T) { }, { name: "updates hostname on diff from current", - beforeFunc: func(obj *ociv1.OCIRepository, storage *Storage) error { - obj.Status.Artifact = &sourcev1.Artifact{ + beforeFunc: func(obj *sourcev1.OCIRepository, storage *storage.Storage) error { + obj.Status.Artifact = &meta.Artifact{ Path: "/oci-reconcile-storage/hostname.txt", Revision: "f", Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", @@ -3180,7 +3249,7 @@ func TestOCIRepository_reconcileStorage(t *testing.T) { assertPaths: []string{ "/oci-reconcile-storage/hostname.txt", }, - assertArtifact: &sourcev1.Artifact{ + assertArtifact: &meta.Artifact{ Path: "/oci-reconcile-storage/hostname.txt", Revision: "f", Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", @@ -3195,7 +3264,7 @@ func TestOCIRepository_reconcileStorage(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&ociv1.OCIRepository{}) + WithStatusSubresource(&sourcev1.OCIRepository{}) r := &OCIRepositoryReconciler{ Client: clientBuilder.Build(), @@ -3208,7 +3277,7 @@ func TestOCIRepository_reconcileStorage(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test-", Generation: 1, @@ -3226,7 +3295,7 @@ func TestOCIRepository_reconcileStorage(t *testing.T) { sp := patch.NewSerialPatcher(obj, r.Client) - got, err := r.reconcileStorage(ctx, sp, obj, &sourcev1.Artifact{}, "") + got, err := r.reconcileStorage(ctx, sp, obj, &meta.Artifact{}, "") if tt.wantErr { g.Expect(err).To(HaveOccurred()) } else { @@ -3267,7 +3336,7 @@ func TestOCIRepository_ReconcileDelete(t *testing.T) { patchOptions: getPatchOptions(ociRepositoryReadyCondition.Owned, "sc"), } - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ ObjectMeta: metav1.ObjectMeta{ Name: "reconcile-delete-", DeletionTimestamp: &metav1.Time{Time: time.Now()}, @@ -3275,10 +3344,10 @@ func TestOCIRepository_ReconcileDelete(t *testing.T) { sourcev1.SourceFinalizer, }, }, - Status: ociv1.OCIRepositoryStatus{}, + Status: sourcev1.OCIRepositoryStatus{}, } - artifact := testStorage.NewArtifactFor(ociv1.OCIRepositoryKind, obj.GetObjectMeta(), "revision", "foo.txt") + artifact := testStorage.NewArtifactFor(sourcev1.OCIRepositoryKind, obj.GetObjectMeta(), "revision", "foo.txt") obj.Status.Artifact = &artifact got, err := r.reconcileDelete(ctx, obj) @@ -3297,8 +3366,8 @@ func TestOCIRepositoryReconciler_notify(t *testing.T) { name string res sreconcile.Result resErr error - oldObjBeforeFunc func(obj *ociv1.OCIRepository) - newObjBeforeFunc func(obj *ociv1.OCIRepository) + oldObjBeforeFunc func(obj *sourcev1.OCIRepository) + newObjBeforeFunc func(obj *sourcev1.OCIRepository) commit git.Commit wantEvent string }{ @@ -3311,9 +3380,9 @@ func TestOCIRepositoryReconciler_notify(t *testing.T) { name: "new artifact", res: sreconcile.ResultSuccess, resErr: nil, - newObjBeforeFunc: func(obj *ociv1.OCIRepository) { + newObjBeforeFunc: func(obj *sourcev1.OCIRepository) { obj.Spec.URL = "oci://newurl.io" - obj.Status.Artifact = &sourcev1.Artifact{ + obj.Status.Artifact = &meta.Artifact{ Revision: "xxx", Digest: "yyy", Metadata: map[string]string{ @@ -3328,14 +3397,14 @@ func TestOCIRepositoryReconciler_notify(t *testing.T) { name: "recovery from failure", res: sreconcile.ResultSuccess, resErr: nil, - oldObjBeforeFunc: func(obj *ociv1.OCIRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.ReadOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, - newObjBeforeFunc: func(obj *ociv1.OCIRepository) { + newObjBeforeFunc: func(obj *sourcev1.OCIRepository) { obj.Spec.URL = "oci://newurl.io" - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, wantEvent: "Normal Succeeded stored artifact with revision 'xxx' from 'oci://newurl.io'", @@ -3344,14 +3413,14 @@ func TestOCIRepositoryReconciler_notify(t *testing.T) { name: "recovery and new artifact", res: sreconcile.ResultSuccess, resErr: nil, - oldObjBeforeFunc: func(obj *ociv1.OCIRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.ReadOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, - newObjBeforeFunc: func(obj *ociv1.OCIRepository) { + newObjBeforeFunc: func(obj *sourcev1.OCIRepository) { obj.Spec.URL = "oci://newurl.io" - obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Digest: "bbb"} + obj.Status.Artifact = &meta.Artifact{Revision: "aaa", Digest: "bbb"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, wantEvent: "Normal NewArtifact stored artifact with revision 'aaa' from 'oci://newurl.io'", @@ -3360,12 +3429,12 @@ func TestOCIRepositoryReconciler_notify(t *testing.T) { name: "no updates", res: sreconcile.ResultSuccess, resErr: nil, - oldObjBeforeFunc: func(obj *ociv1.OCIRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, - newObjBeforeFunc: func(obj *ociv1.OCIRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + newObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, }, @@ -3373,8 +3442,8 @@ func TestOCIRepositoryReconciler_notify(t *testing.T) { name: "no updates on requeue", res: sreconcile.ResultRequeue, resErr: nil, - oldObjBeforeFunc: func(obj *ociv1.OCIRepository) { - obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} + oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) { + obj.Status.Artifact = &meta.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "ready") }, }, @@ -3385,7 +3454,7 @@ func TestOCIRepositoryReconciler_notify(t *testing.T) { g := NewWithT(t) recorder := record.NewFakeRecorder(32) - oldObj := &ociv1.OCIRepository{} + oldObj := &sourcev1.OCIRepository{} newObj := oldObj.DeepCopy() if tt.oldObjBeforeFunc != nil { @@ -3513,112 +3582,112 @@ func setPodinfoImageAnnotations(img gcrv1.Image, tag string) gcrv1.Image { func TestOCIContentConfigChanged(t *testing.T) { tests := []struct { name string - spec ociv1.OCIRepositorySpec - status ociv1.OCIRepositoryStatus + spec sourcev1.OCIRepositorySpec + status sourcev1.OCIRepositoryStatus want bool }{ { name: "same ignore, no layer selector", - spec: ociv1.OCIRepositorySpec{ + spec: sourcev1.OCIRepositorySpec{ Ignore: ptr.To("nnn"), }, - status: ociv1.OCIRepositoryStatus{ + status: sourcev1.OCIRepositoryStatus{ ObservedIgnore: ptr.To("nnn"), }, want: false, }, { name: "different ignore, no layer selector", - spec: ociv1.OCIRepositorySpec{ + spec: sourcev1.OCIRepositorySpec{ Ignore: ptr.To("nnn"), }, - status: ociv1.OCIRepositoryStatus{ + status: sourcev1.OCIRepositoryStatus{ ObservedIgnore: ptr.To("mmm"), }, want: true, }, { name: "same ignore, same layer selector", - spec: ociv1.OCIRepositorySpec{ + spec: sourcev1.OCIRepositorySpec{ Ignore: ptr.To("nnn"), - LayerSelector: &ociv1.OCILayerSelector{ + LayerSelector: &sourcev1.OCILayerSelector{ MediaType: "foo", - Operation: ociv1.OCILayerExtract, + Operation: sourcev1.OCILayerExtract, }, }, - status: ociv1.OCIRepositoryStatus{ + status: sourcev1.OCIRepositoryStatus{ ObservedIgnore: ptr.To("nnn"), - ObservedLayerSelector: &ociv1.OCILayerSelector{ + ObservedLayerSelector: &sourcev1.OCILayerSelector{ MediaType: "foo", - Operation: ociv1.OCILayerExtract, + Operation: sourcev1.OCILayerExtract, }, }, want: false, }, { name: "same ignore, different layer selector operation", - spec: ociv1.OCIRepositorySpec{ + spec: sourcev1.OCIRepositorySpec{ Ignore: ptr.To("nnn"), - LayerSelector: &ociv1.OCILayerSelector{ + LayerSelector: &sourcev1.OCILayerSelector{ MediaType: "foo", - Operation: ociv1.OCILayerCopy, + Operation: sourcev1.OCILayerCopy, }, }, - status: ociv1.OCIRepositoryStatus{ + status: sourcev1.OCIRepositoryStatus{ ObservedIgnore: ptr.To("nnn"), - ObservedLayerSelector: &ociv1.OCILayerSelector{ + ObservedLayerSelector: &sourcev1.OCILayerSelector{ MediaType: "foo", - Operation: ociv1.OCILayerExtract, + Operation: sourcev1.OCILayerExtract, }, }, want: true, }, { name: "same ignore, different layer selector mediatype", - spec: ociv1.OCIRepositorySpec{ + spec: sourcev1.OCIRepositorySpec{ Ignore: ptr.To("nnn"), - LayerSelector: &ociv1.OCILayerSelector{ + LayerSelector: &sourcev1.OCILayerSelector{ MediaType: "bar", - Operation: ociv1.OCILayerExtract, + Operation: sourcev1.OCILayerExtract, }, }, - status: ociv1.OCIRepositoryStatus{ + status: sourcev1.OCIRepositoryStatus{ ObservedIgnore: ptr.To("nnn"), - ObservedLayerSelector: &ociv1.OCILayerSelector{ + ObservedLayerSelector: &sourcev1.OCILayerSelector{ MediaType: "foo", - Operation: ociv1.OCILayerExtract, + Operation: sourcev1.OCILayerExtract, }, }, want: true, }, { name: "no ignore, same layer selector", - spec: ociv1.OCIRepositorySpec{ - LayerSelector: &ociv1.OCILayerSelector{ + spec: sourcev1.OCIRepositorySpec{ + LayerSelector: &sourcev1.OCILayerSelector{ MediaType: "foo", - Operation: ociv1.OCILayerExtract, + Operation: sourcev1.OCILayerExtract, }, }, - status: ociv1.OCIRepositoryStatus{ - ObservedLayerSelector: &ociv1.OCILayerSelector{ + status: sourcev1.OCIRepositoryStatus{ + ObservedLayerSelector: &sourcev1.OCILayerSelector{ MediaType: "foo", - Operation: ociv1.OCILayerExtract, + Operation: sourcev1.OCILayerExtract, }, }, want: false, }, { name: "no ignore, different layer selector", - spec: ociv1.OCIRepositorySpec{ - LayerSelector: &ociv1.OCILayerSelector{ + spec: sourcev1.OCIRepositorySpec{ + LayerSelector: &sourcev1.OCILayerSelector{ MediaType: "bar", - Operation: ociv1.OCILayerExtract, + Operation: sourcev1.OCILayerExtract, }, }, - status: ociv1.OCIRepositoryStatus{ - ObservedLayerSelector: &ociv1.OCILayerSelector{ + status: sourcev1.OCIRepositoryStatus{ + ObservedLayerSelector: &sourcev1.OCILayerSelector{ MediaType: "foo", - Operation: ociv1.OCILayerExtract, + Operation: sourcev1.OCILayerExtract, }, }, want: true, @@ -3629,7 +3698,7 @@ func TestOCIContentConfigChanged(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - obj := &ociv1.OCIRepository{ + obj := &sourcev1.OCIRepository{ Spec: tt.spec, Status: tt.status, } @@ -3638,188 +3707,3 @@ func TestOCIContentConfigChanged(t *testing.T) { }) } } - -func TestOCIRepositoryReconciler_getProxyURL(t *testing.T) { - tests := []struct { - name string - ociRepo *ociv1.OCIRepository - objects []client.Object - expectedURL string - expectedErr string - }{ - { - name: "empty proxySecretRef", - ociRepo: &ociv1.OCIRepository{ - Spec: ociv1.OCIRepositorySpec{ - ProxySecretRef: nil, - }, - }, - }, - { - name: "non-existing proxySecretRef", - ociRepo: &ociv1.OCIRepository{ - Spec: ociv1.OCIRepositorySpec{ - ProxySecretRef: &meta.LocalObjectReference{ - Name: "non-existing", - }, - }, - }, - expectedErr: "secrets \"non-existing\" not found", - }, - { - name: "missing address in proxySecretRef", - ociRepo: &ociv1.OCIRepository{ - Spec: ociv1.OCIRepositorySpec{ - ProxySecretRef: &meta.LocalObjectReference{ - Name: "dummy", - }, - }, - }, - objects: []client.Object{ - &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dummy", - }, - Data: map[string][]byte{}, - }, - }, - expectedErr: "invalid proxy secret '/dummy': key 'address' is missing", - }, - { - name: "invalid address in proxySecretRef", - ociRepo: &ociv1.OCIRepository{ - Spec: ociv1.OCIRepositorySpec{ - ProxySecretRef: &meta.LocalObjectReference{ - Name: "dummy", - }, - }, - }, - objects: []client.Object{ - &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dummy", - }, - Data: map[string][]byte{ - "address": {0x7f}, - }, - }, - }, - expectedErr: "failed to parse proxy address '\x7f': parse \"\\x7f\": net/url: invalid control character in URL", - }, - { - name: "no user, no password", - ociRepo: &ociv1.OCIRepository{ - Spec: ociv1.OCIRepositorySpec{ - ProxySecretRef: &meta.LocalObjectReference{ - Name: "dummy", - }, - }, - }, - objects: []client.Object{ - &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dummy", - }, - Data: map[string][]byte{ - "address": []byte("http://proxy.example.com"), - }, - }, - }, - expectedURL: "http://proxy.example.com", - }, - { - name: "user, no password", - ociRepo: &ociv1.OCIRepository{ - Spec: ociv1.OCIRepositorySpec{ - ProxySecretRef: &meta.LocalObjectReference{ - Name: "dummy", - }, - }, - }, - objects: []client.Object{ - &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dummy", - }, - Data: map[string][]byte{ - "address": []byte("http://proxy.example.com"), - "username": []byte("user"), - }, - }, - }, - expectedURL: "http://user:@proxy.example.com", - }, - { - name: "no user, password", - ociRepo: &ociv1.OCIRepository{ - Spec: ociv1.OCIRepositorySpec{ - ProxySecretRef: &meta.LocalObjectReference{ - Name: "dummy", - }, - }, - }, - objects: []client.Object{ - &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dummy", - }, - Data: map[string][]byte{ - "address": []byte("http://proxy.example.com"), - "password": []byte("password"), - }, - }, - }, - expectedURL: "http://:password@proxy.example.com", - }, - { - name: "user, password", - ociRepo: &ociv1.OCIRepository{ - Spec: ociv1.OCIRepositorySpec{ - ProxySecretRef: &meta.LocalObjectReference{ - Name: "dummy", - }, - }, - }, - objects: []client.Object{ - &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "dummy", - }, - Data: map[string][]byte{ - "address": []byte("http://proxy.example.com"), - "username": []byte("user"), - "password": []byte("password"), - }, - }, - }, - expectedURL: "http://user:password@proxy.example.com", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - c := fakeclient.NewClientBuilder(). - WithScheme(testEnv.Scheme()). - WithObjects(tt.objects...). - Build() - - r := &OCIRepositoryReconciler{ - Client: c, - } - - u, err := r.getProxyURL(ctx, tt.ociRepo) - if tt.expectedErr == "" { - g.Expect(err).To(BeNil()) - } else { - g.Expect(err.Error()).To(ContainSubstring(tt.expectedErr)) - } - if tt.expectedURL == "" { - g.Expect(u).To(BeNil()) - } else { - g.Expect(u.String()).To(Equal(tt.expectedURL)) - } - }) - } -} diff --git a/internal/controller/storage.go b/internal/controller/storage.go deleted file mode 100644 index af4b79a70..000000000 --- a/internal/controller/storage.go +++ /dev/null @@ -1,719 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "archive/tar" - "compress/gzip" - "context" - "fmt" - "io" - "io/fs" - "net/url" - "os" - "path/filepath" - "sort" - "strings" - "time" - - securejoin "github.com/cyphar/filepath-securejoin" - "github.com/go-git/go-git/v5/plumbing/format/gitignore" - "github.com/opencontainers/go-digest" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kerrors "k8s.io/apimachinery/pkg/util/errors" - - "github.com/fluxcd/pkg/lockedfile" - "github.com/fluxcd/pkg/sourceignore" - pkgtar "github.com/fluxcd/pkg/tar" - - v1 "github.com/fluxcd/source-controller/api/v1" - intdigest "github.com/fluxcd/source-controller/internal/digest" - sourcefs "github.com/fluxcd/source-controller/internal/fs" -) - -const GarbageCountLimit = 1000 - -const ( - // defaultFileMode is the permission mode applied to files inside an artifact archive. - defaultFileMode int64 = 0o600 - // defaultDirMode is the permission mode applied to all directories inside an artifact archive. - defaultDirMode int64 = 0o750 - // defaultExeFileMode is the permission mode applied to executable files inside an artifact archive. - defaultExeFileMode int64 = 0o700 -) - -// Storage manages artifacts -type Storage struct { - // BasePath is the local directory path where the source artifacts are stored. - BasePath string `json:"basePath"` - - // Hostname is the file server host name used to compose the artifacts URIs. - Hostname string `json:"hostname"` - - // ArtifactRetentionTTL is the duration of time that artifacts will be kept - // in storage before being garbage collected. - ArtifactRetentionTTL time.Duration `json:"artifactRetentionTTL"` - - // ArtifactRetentionRecords is the maximum number of artifacts to be kept in - // storage after a garbage collection. - ArtifactRetentionRecords int `json:"artifactRetentionRecords"` -} - -// NewStorage creates the storage helper for a given path and hostname. -func NewStorage(basePath string, hostname string, artifactRetentionTTL time.Duration, artifactRetentionRecords int) (*Storage, error) { - if f, err := os.Stat(basePath); os.IsNotExist(err) || !f.IsDir() { - return nil, fmt.Errorf("invalid dir path: %s", basePath) - } - return &Storage{ - BasePath: basePath, - Hostname: hostname, - ArtifactRetentionTTL: artifactRetentionTTL, - ArtifactRetentionRecords: artifactRetentionRecords, - }, nil -} - -// NewArtifactFor returns a new v1.Artifact. -func (s Storage) NewArtifactFor(kind string, metadata metav1.Object, revision, fileName string) v1.Artifact { - path := v1.ArtifactPath(kind, metadata.GetNamespace(), metadata.GetName(), fileName) - artifact := v1.Artifact{ - Path: path, - Revision: revision, - } - s.SetArtifactURL(&artifact) - return artifact -} - -// SetArtifactURL sets the URL on the given v1.Artifact. -func (s Storage) SetArtifactURL(artifact *v1.Artifact) { - if artifact.Path == "" { - return - } - format := "http://%s/%s" - if strings.HasPrefix(s.Hostname, "http://") || strings.HasPrefix(s.Hostname, "https://") { - format = "%s/%s" - } - artifact.URL = fmt.Sprintf(format, s.Hostname, strings.TrimLeft(artifact.Path, "/")) -} - -// SetHostname sets the hostname of the given URL string to the current Storage.Hostname and returns the result. -func (s Storage) SetHostname(URL string) string { - u, err := url.Parse(URL) - if err != nil { - return "" - } - u.Host = s.Hostname - return u.String() -} - -// MkdirAll calls os.MkdirAll for the given v1.Artifact base dir. -func (s Storage) MkdirAll(artifact v1.Artifact) error { - dir := filepath.Dir(s.LocalPath(artifact)) - return os.MkdirAll(dir, 0o700) -} - -// Remove calls os.Remove for the given v1.Artifact path. -func (s Storage) Remove(artifact v1.Artifact) error { - return os.Remove(s.LocalPath(artifact)) -} - -// RemoveAll calls os.RemoveAll for the given v1.Artifact base dir. -func (s Storage) RemoveAll(artifact v1.Artifact) (string, error) { - var deletedDir string - dir := filepath.Dir(s.LocalPath(artifact)) - // Check if the dir exists. - _, err := os.Stat(dir) - if err == nil { - deletedDir = dir - } - return deletedDir, os.RemoveAll(dir) -} - -// RemoveAllButCurrent removes all files for the given v1.Artifact base dir, excluding the current one. -func (s Storage) RemoveAllButCurrent(artifact v1.Artifact) ([]string, error) { - deletedFiles := []string{} - localPath := s.LocalPath(artifact) - dir := filepath.Dir(localPath) - var errors []string - _ = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - errors = append(errors, err.Error()) - return nil - } - - if path != localPath && !info.IsDir() && info.Mode()&os.ModeSymlink != os.ModeSymlink { - if err := os.Remove(path); err != nil { - errors = append(errors, info.Name()) - } else { - // Collect the successfully deleted file paths. - deletedFiles = append(deletedFiles, path) - } - } - return nil - }) - - if len(errors) > 0 { - return deletedFiles, fmt.Errorf("failed to remove files: %s", strings.Join(errors, " ")) - } - return deletedFiles, nil -} - -// getGarbageFiles returns all files that need to be garbage collected for the given artifact. -// Garbage files are determined based on the below flow: -// 1. collect all artifact files with an expired ttl -// 2. if we satisfy maxItemsToBeRetained, then return -// 3. else, collect all artifact files till the latest n files remain, where n=maxItemsToBeRetained -func (s Storage) getGarbageFiles(artifact v1.Artifact, totalCountLimit, maxItemsToBeRetained int, ttl time.Duration) (garbageFiles []string, _ error) { - localPath := s.LocalPath(artifact) - dir := filepath.Dir(localPath) - artifactFilesWithCreatedTs := make(map[time.Time]string) - // sortedPaths contain all files sorted according to their created ts. - sortedPaths := []string{} - now := time.Now().UTC() - totalArtifactFiles := 0 - var errors []string - creationTimestamps := []time.Time{} - _ = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { - if err != nil { - errors = append(errors, err.Error()) - return nil - } - if totalArtifactFiles >= totalCountLimit { - return fmt.Errorf("reached file walking limit, already walked over: %d", totalArtifactFiles) - } - info, err := d.Info() - if err != nil { - errors = append(errors, err.Error()) - return nil - } - createdAt := info.ModTime().UTC() - diff := now.Sub(createdAt) - // Compare the time difference between now and the time at which the file was created - // with the provided TTL. Delete if the difference is greater than the TTL. Since the - // below logic just deals with determining if an artifact needs to be garbage collected, - // we avoid all lock files, adding them at the end to the list of garbage files. - expired := diff > ttl - if !info.IsDir() && info.Mode()&os.ModeSymlink != os.ModeSymlink && filepath.Ext(path) != ".lock" { - if path != localPath && expired { - garbageFiles = append(garbageFiles, path) - } - totalArtifactFiles += 1 - artifactFilesWithCreatedTs[createdAt] = path - creationTimestamps = append(creationTimestamps, createdAt) - } - return nil - - }) - if len(errors) > 0 { - return nil, fmt.Errorf("can't walk over file: %s", strings.Join(errors, ",")) - } - - // We already collected enough garbage files to satisfy the no. of max - // items that are supposed to be retained, so exit early. - if totalArtifactFiles-len(garbageFiles) < maxItemsToBeRetained { - return garbageFiles, nil - } - - // sort all timestamps in ascending order. - sort.Slice(creationTimestamps, func(i, j int) bool { return creationTimestamps[i].Before(creationTimestamps[j]) }) - for _, ts := range creationTimestamps { - path, ok := artifactFilesWithCreatedTs[ts] - if !ok { - return garbageFiles, fmt.Errorf("failed to fetch file for created ts: %v", ts) - } - sortedPaths = append(sortedPaths, path) - } - - var collected int - noOfGarbageFiles := len(garbageFiles) - for _, path := range sortedPaths { - if path != localPath && filepath.Ext(path) != ".lock" && !stringInSlice(path, garbageFiles) { - // If we previously collected some garbage files with an expired ttl, then take that into account - // when checking whether we need to remove more files to satisfy the max no. of items allowed - // in the filesystem, along with the no. of files already removed in this loop. - if noOfGarbageFiles > 0 { - if (len(sortedPaths) - collected - len(garbageFiles)) > maxItemsToBeRetained { - garbageFiles = append(garbageFiles, path) - collected += 1 - } - } else { - if len(sortedPaths)-collected > maxItemsToBeRetained { - garbageFiles = append(garbageFiles, path) - collected += 1 - } - } - } - } - - return garbageFiles, nil -} - -// GarbageCollect removes all garbage files in the artifact dir according to the provided -// retention options. -func (s Storage) GarbageCollect(ctx context.Context, artifact v1.Artifact, timeout time.Duration) ([]string, error) { - delFilesChan := make(chan []string) - errChan := make(chan error) - // Abort if it takes more than the provided timeout duration. - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - go func() { - garbageFiles, err := s.getGarbageFiles(artifact, GarbageCountLimit, s.ArtifactRetentionRecords, s.ArtifactRetentionTTL) - if err != nil { - errChan <- err - return - } - var errors []error - var deleted []string - if len(garbageFiles) > 0 { - for _, file := range garbageFiles { - err := os.Remove(file) - if err != nil { - errors = append(errors, err) - } else { - deleted = append(deleted, file) - } - // If a lock file exists for this garbage artifact, remove that too. - lockFile := file + ".lock" - if _, err = os.Lstat(lockFile); err == nil { - err = os.Remove(lockFile) - if err != nil { - errors = append(errors, err) - } - } - } - } - if len(errors) > 0 { - errChan <- kerrors.NewAggregate(errors) - return - } - delFilesChan <- deleted - }() - - for { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case delFiles := <-delFilesChan: - return delFiles, nil - case err := <-errChan: - return nil, err - } - } -} - -func stringInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } - } - return false -} - -// ArtifactExist returns a boolean indicating whether the v1.Artifact exists in storage and is a regular file. -func (s Storage) ArtifactExist(artifact v1.Artifact) bool { - fi, err := os.Lstat(s.LocalPath(artifact)) - if err != nil { - return false - } - return fi.Mode().IsRegular() -} - -// VerifyArtifact verifies if the Digest of the v1.Artifact matches the digest -// of the file in Storage. It returns an error if the digests don't match, or -// if it can't be verified. -func (s Storage) VerifyArtifact(artifact v1.Artifact) error { - if artifact.Digest == "" { - return fmt.Errorf("artifact has no digest") - } - - d, err := digest.Parse(artifact.Digest) - if err != nil { - return fmt.Errorf("failed to parse artifact digest '%s': %w", artifact.Digest, err) - } - - f, err := os.Open(s.LocalPath(artifact)) - if err != nil { - return err - } - defer f.Close() - - verifier := d.Verifier() - if _, err = io.Copy(verifier, f); err != nil { - return err - } - if !verifier.Verified() { - return fmt.Errorf("computed digest doesn't match '%s'", d.String()) - } - return nil -} - -// ArchiveFileFilter must return true if a file should not be included in the archive after inspecting the given path -// and/or os.FileInfo. -type ArchiveFileFilter func(p string, fi os.FileInfo) bool - -// SourceIgnoreFilter returns an ArchiveFileFilter that filters out files matching sourceignore.VCSPatterns and any of -// the provided patterns. -// If an empty gitignore.Pattern slice is given, the matcher is set to sourceignore.NewDefaultMatcher. -func SourceIgnoreFilter(ps []gitignore.Pattern, domain []string) ArchiveFileFilter { - matcher := sourceignore.NewDefaultMatcher(ps, domain) - if len(ps) > 0 { - ps = append(sourceignore.VCSPatterns(domain), ps...) - matcher = sourceignore.NewMatcher(ps) - } - return func(p string, fi os.FileInfo) bool { - return matcher.Match(strings.Split(p, string(filepath.Separator)), fi.IsDir()) - } -} - -// Archive atomically archives the given directory as a tarball to the given v1.Artifact path, excluding -// directories and any ArchiveFileFilter matches. While archiving, any environment specific data (for example, -// the user and group name) is stripped from file headers. -// If successful, it sets the digest and last update time on the artifact. -func (s Storage) Archive(artifact *v1.Artifact, dir string, filter ArchiveFileFilter) (err error) { - if f, err := os.Stat(dir); os.IsNotExist(err) || !f.IsDir() { - return fmt.Errorf("invalid dir path: %s", dir) - } - - localPath := s.LocalPath(*artifact) - tf, err := os.CreateTemp(filepath.Split(localPath)) - if err != nil { - return err - } - tmpName := tf.Name() - defer func() { - if err != nil { - os.Remove(tmpName) - } - }() - - d := intdigest.Canonical.Digester() - sz := &writeCounter{} - mw := io.MultiWriter(d.Hash(), tf, sz) - - gw := gzip.NewWriter(mw) - tw := tar.NewWriter(gw) - if err := filepath.Walk(dir, func(p string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - // Ignore anything that is not a file or directories e.g. symlinks - if m := fi.Mode(); !(m.IsRegular() || m.IsDir()) { - return nil - } - - // Skip filtered files - if filter != nil && filter(p, fi) { - return nil - } - - header, err := tar.FileInfoHeader(fi, p) - if err != nil { - return err - } - - // The name needs to be modified to maintain directory structure - // as tar.FileInfoHeader only has access to the base name of the file. - // Ref: https://golang.org/src/archive/tar/common.go?#L626 - relFilePath := p - if filepath.IsAbs(dir) { - relFilePath, err = filepath.Rel(dir, p) - if err != nil { - return err - } - } - sanitizeHeader(relFilePath, header) - - if err := tw.WriteHeader(header); err != nil { - return err - } - - if !fi.Mode().IsRegular() { - return nil - } - f, err := os.Open(p) - if err != nil { - f.Close() - return err - } - if _, err := io.Copy(tw, f); err != nil { - f.Close() - return err - } - return f.Close() - }); err != nil { - tw.Close() - gw.Close() - tf.Close() - return err - } - - if err := tw.Close(); err != nil { - gw.Close() - tf.Close() - return err - } - if err := gw.Close(); err != nil { - tf.Close() - return err - } - if err := tf.Close(); err != nil { - return err - } - - if err := os.Chmod(tmpName, 0o600); err != nil { - return err - } - - if err := sourcefs.RenameWithFallback(tmpName, localPath); err != nil { - return err - } - - artifact.Digest = d.Digest().String() - artifact.LastUpdateTime = metav1.Now() - artifact.Size = &sz.written - - return nil -} - -// AtomicWriteFile atomically writes the io.Reader contents to the v1.Artifact path. -// If successful, it sets the digest and last update time on the artifact. -func (s Storage) AtomicWriteFile(artifact *v1.Artifact, reader io.Reader, mode os.FileMode) (err error) { - localPath := s.LocalPath(*artifact) - tf, err := os.CreateTemp(filepath.Split(localPath)) - if err != nil { - return err - } - tfName := tf.Name() - defer func() { - if err != nil { - os.Remove(tfName) - } - }() - - d := intdigest.Canonical.Digester() - sz := &writeCounter{} - mw := io.MultiWriter(tf, d.Hash(), sz) - - if _, err := io.Copy(mw, reader); err != nil { - tf.Close() - return err - } - if err := tf.Close(); err != nil { - return err - } - - if err := os.Chmod(tfName, mode); err != nil { - return err - } - - if err := sourcefs.RenameWithFallback(tfName, localPath); err != nil { - return err - } - - artifact.Digest = d.Digest().String() - artifact.LastUpdateTime = metav1.Now() - artifact.Size = &sz.written - - return nil -} - -// Copy atomically copies the io.Reader contents to the v1.Artifact path. -// If successful, it sets the digest and last update time on the artifact. -func (s Storage) Copy(artifact *v1.Artifact, reader io.Reader) (err error) { - localPath := s.LocalPath(*artifact) - tf, err := os.CreateTemp(filepath.Split(localPath)) - if err != nil { - return err - } - tfName := tf.Name() - defer func() { - if err != nil { - os.Remove(tfName) - } - }() - - d := intdigest.Canonical.Digester() - sz := &writeCounter{} - mw := io.MultiWriter(tf, d.Hash(), sz) - - if _, err := io.Copy(mw, reader); err != nil { - tf.Close() - return err - } - if err := tf.Close(); err != nil { - return err - } - - if err := sourcefs.RenameWithFallback(tfName, localPath); err != nil { - return err - } - - artifact.Digest = d.Digest().String() - artifact.LastUpdateTime = metav1.Now() - artifact.Size = &sz.written - - return nil -} - -// CopyFromPath atomically copies the contents of the given path to the path of the v1.Artifact. -// If successful, the digest and last update time on the artifact is set. -func (s Storage) CopyFromPath(artifact *v1.Artifact, path string) (err error) { - f, err := os.Open(path) - if err != nil { - return err - } - defer func() { - if cerr := f.Close(); cerr != nil && err == nil { - err = cerr - } - }() - err = s.Copy(artifact, f) - return err -} - -// CopyToPath copies the contents in the (sub)path of the given artifact to the given path. -func (s Storage) CopyToPath(artifact *v1.Artifact, subPath, toPath string) error { - // create a tmp directory to store artifact - tmp, err := os.MkdirTemp("", "flux-include-") - if err != nil { - return err - } - defer os.RemoveAll(tmp) - - // read artifact file content - localPath := s.LocalPath(*artifact) - f, err := os.Open(localPath) - if err != nil { - return err - } - defer f.Close() - - // untar the artifact - untarPath := filepath.Join(tmp, "unpack") - if err = pkgtar.Untar(f, untarPath, pkgtar.WithMaxUntarSize(-1)); err != nil { - return err - } - - // create the destination parent dir - if err = os.MkdirAll(filepath.Dir(toPath), os.ModePerm); err != nil { - return err - } - - // copy the artifact content to the destination dir - fromPath, err := securejoin.SecureJoin(untarPath, subPath) - if err != nil { - return err - } - if err := sourcefs.RenameWithFallback(fromPath, toPath); err != nil { - return err - } - return nil -} - -// Symlink creates or updates a symbolic link for the given v1.Artifact and returns the URL for the symlink. -func (s Storage) Symlink(artifact v1.Artifact, linkName string) (string, error) { - localPath := s.LocalPath(artifact) - dir := filepath.Dir(localPath) - link := filepath.Join(dir, linkName) - tmpLink := link + ".tmp" - - if err := os.Remove(tmpLink); err != nil && !os.IsNotExist(err) { - return "", err - } - - if err := os.Symlink(localPath, tmpLink); err != nil { - return "", err - } - - if err := os.Rename(tmpLink, link); err != nil { - return "", err - } - - return fmt.Sprintf("http://%s/%s", s.Hostname, filepath.Join(filepath.Dir(artifact.Path), linkName)), nil -} - -// Lock creates a file lock for the given v1.Artifact. -func (s Storage) Lock(artifact v1.Artifact) (unlock func(), err error) { - lockFile := s.LocalPath(artifact) + ".lock" - mutex := lockedfile.MutexAt(lockFile) - return mutex.Lock() -} - -// LocalPath returns the secure local path of the given artifact (that is: relative to the Storage.BasePath). -func (s Storage) LocalPath(artifact v1.Artifact) string { - if artifact.Path == "" { - return "" - } - path, err := securejoin.SecureJoin(s.BasePath, artifact.Path) - if err != nil { - return "" - } - return path -} - -// writeCounter is an implementation of io.Writer that only records the number -// of bytes written. -type writeCounter struct { - written int64 -} - -func (wc *writeCounter) Write(p []byte) (int, error) { - n := len(p) - wc.written += int64(n) - return n, nil -} - -// sanitizeHeader modifies the tar.Header to be relative to the root of the -// archive and removes any environment specific data. -func sanitizeHeader(relP string, h *tar.Header) { - // Modify the name to be relative to the root of the archive, - // this ensures we maintain the same structure when extracting. - h.Name = relP - - // We want to remove any environment specific data as well, this - // ensures the checksum is purely content based. - h.Gid = 0 - h.Uid = 0 - h.Uname = "" - h.Gname = "" - h.ModTime = time.Time{} - h.AccessTime = time.Time{} - h.ChangeTime = time.Time{} - - // Override the mode to be the default for the type of file. - setDefaultMode(h) -} - -// setDefaultMode sets the default mode for the given header. -func setDefaultMode(h *tar.Header) { - if h.FileInfo().IsDir() { - h.Mode = defaultDirMode - return - } - - if h.FileInfo().Mode().IsRegular() { - mode := h.FileInfo().Mode() - if mode&os.ModeType == 0 && mode&0o111 != 0 { - h.Mode = defaultExeFileMode - return - } - h.Mode = defaultFileMode - return - } -} diff --git a/internal/controller/storage_test.go b/internal/controller/storage_test.go deleted file mode 100644 index 1b65ce914..000000000 --- a/internal/controller/storage_test.go +++ /dev/null @@ -1,853 +0,0 @@ -/* -Copyright 2020, 2021 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "context" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/go-git/go-git/v5/plumbing/format/gitignore" - . "github.com/onsi/gomega" - - sourcev1 "github.com/fluxcd/source-controller/api/v1" -) - -func TestStorageConstructor(t *testing.T) { - dir := t.TempDir() - - if _, err := NewStorage("/nonexistent", "hostname", time.Minute, 2); err == nil { - t.Fatal("nonexistent path was allowable in storage constructor") - } - - f, err := os.CreateTemp(dir, "") - if err != nil { - t.Fatalf("while creating temporary file: %v", err) - } - f.Close() - - if _, err := NewStorage(f.Name(), "hostname", time.Minute, 2); err == nil { - os.Remove(f.Name()) - t.Fatal("file path was accepted as basedir") - } - os.Remove(f.Name()) - - if _, err := NewStorage(dir, "hostname", time.Minute, 2); err != nil { - t.Fatalf("Valid path did not successfully return: %v", err) - } -} - -// walks a tar.gz and looks for paths with the basename. It does not match -// symlinks properly at this time because that's painful. -func walkTar(tarFile string, match string, dir bool) (int64, int64, bool, error) { - f, err := os.Open(tarFile) - if err != nil { - return 0, 0, false, fmt.Errorf("could not open file: %w", err) - } - defer f.Close() - - gzr, err := gzip.NewReader(f) - if err != nil { - return 0, 0, false, fmt.Errorf("could not unzip file: %w", err) - } - defer gzr.Close() - - tr := tar.NewReader(gzr) - for { - header, err := tr.Next() - if err == io.EOF { - break - } else if err != nil { - return 0, 0, false, fmt.Errorf("corrupt tarball reading header: %w", err) - } - - switch header.Typeflag { - case tar.TypeDir: - if header.Name == match && dir { - return 0, header.Mode, true, nil - } - case tar.TypeReg: - if header.Name == match { - return header.Size, header.Mode, true, nil - } - default: - // skip - } - } - - return 0, 0, false, nil -} - -func TestStorage_Archive(t *testing.T) { - dir := t.TempDir() - - storage, err := NewStorage(dir, "hostname", time.Minute, 2) - if err != nil { - t.Fatalf("error while bootstrapping storage: %v", err) - } - - type dummyFile struct { - content []byte - mode int64 - } - - createFiles := func(files map[string]dummyFile) (dir string, err error) { - dir = t.TempDir() - for name, df := range files { - absPath := filepath.Join(dir, name) - if err = os.MkdirAll(filepath.Dir(absPath), 0o750); err != nil { - return - } - f, err := os.Create(absPath) - if err != nil { - return "", fmt.Errorf("could not create file %q: %w", absPath, err) - } - if n, err := f.Write(df.content); err != nil { - f.Close() - return "", fmt.Errorf("could not write %d bytes to file %q: %w", n, f.Name(), err) - } - f.Close() - - if df.mode != 0 { - if err = os.Chmod(absPath, os.FileMode(df.mode)); err != nil { - return "", fmt.Errorf("could not chmod file %q: %w", absPath, err) - } - } - } - return - } - - matchFiles := func(t *testing.T, storage *Storage, artifact sourcev1.Artifact, files map[string]dummyFile, dirs []string) { - t.Helper() - for name, df := range files { - mustExist := !(name[0:1] == "!") - if !mustExist { - name = name[1:] - } - s, m, exist, err := walkTar(storage.LocalPath(artifact), name, false) - if err != nil { - t.Fatalf("failed reading tarball: %v", err) - } - if bs := int64(len(df.content)); s != bs { - t.Fatalf("%q size %v != %v", name, s, bs) - } - if exist != mustExist { - if mustExist { - t.Errorf("could not find file %q in tarball", name) - } else { - t.Errorf("tarball contained excluded file %q", name) - } - } - expectMode := df.mode - if expectMode == 0 { - expectMode = defaultFileMode - } - if exist && m != expectMode { - t.Fatalf("%q mode %v != %v", name, m, expectMode) - } - } - for _, name := range dirs { - mustExist := !(name[0:1] == "!") - if !mustExist { - name = name[1:] - } - _, m, exist, err := walkTar(storage.LocalPath(artifact), name, true) - if err != nil { - t.Fatalf("failed reading tarball: %v", err) - } - if exist != mustExist { - if mustExist { - t.Errorf("could not find dir %q in tarball", name) - } else { - t.Errorf("tarball contained excluded file %q", name) - } - } - if exist && m != defaultDirMode { - t.Fatalf("%q mode %v != %v", name, m, defaultDirMode) - } - - } - } - - tests := []struct { - name string - files map[string]dummyFile - filter ArchiveFileFilter - want map[string]dummyFile - wantDirs []string - wantErr bool - }{ - { - name: "no filter", - files: map[string]dummyFile{ - ".git/config": {}, - "file.jpg": {content: []byte(`contents`)}, - "manifest.yaml": {}, - }, - filter: nil, - want: map[string]dummyFile{ - ".git/config": {}, - "file.jpg": {content: []byte(`contents`)}, - "manifest.yaml": {}, - }, - }, - { - name: "exclude VCS", - files: map[string]dummyFile{ - ".git/config": {}, - "manifest.yaml": {}, - }, - wantDirs: []string{ - "!.git", - }, - filter: SourceIgnoreFilter(nil, nil), - want: map[string]dummyFile{ - "!.git/config": {}, - "manifest.yaml": {}, - }, - }, - { - name: "custom", - files: map[string]dummyFile{ - ".git/config": {}, - "custom": {}, - "horse.jpg": {}, - }, - filter: SourceIgnoreFilter([]gitignore.Pattern{ - gitignore.ParsePattern("custom", nil), - }, nil), - want: map[string]dummyFile{ - "!git/config": {}, - "!custom": {}, - "horse.jpg": {}, - }, - wantErr: false, - }, - { - name: "including directories", - files: map[string]dummyFile{ - "test/.gitkeep": {}, - }, - filter: SourceIgnoreFilter([]gitignore.Pattern{ - gitignore.ParsePattern("custom", nil), - }, nil), - wantDirs: []string{ - "test", - }, - wantErr: false, - }, - { - name: "sets default file modes", - files: map[string]dummyFile{ - "test/file": { - mode: 0o666, - }, - "test/executable": { - mode: 0o777, - }, - }, - want: map[string]dummyFile{ - "test/file": { - mode: defaultFileMode, - }, - "test/executable": { - mode: defaultExeFileMode, - }, - }, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - dir, err := createFiles(tt.files) - if err != nil { - t.Error(err) - return - } - defer os.RemoveAll(dir) - artifact := sourcev1.Artifact{ - Path: filepath.Join(randStringRunes(10), randStringRunes(10), randStringRunes(10)+".tar.gz"), - } - if err := storage.MkdirAll(artifact); err != nil { - t.Fatalf("artifact directory creation failed: %v", err) - } - if err := storage.Archive(&artifact, dir, tt.filter); (err != nil) != tt.wantErr { - t.Errorf("Archive() error = %v, wantErr %v", err, tt.wantErr) - } - matchFiles(t, storage, artifact, tt.want, tt.wantDirs) - }) - } -} - -func TestStorage_Remove(t *testing.T) { - t.Run("removes file", func(t *testing.T) { - g := NewWithT(t) - - dir := t.TempDir() - - s, err := NewStorage(dir, "", 0, 0) - g.Expect(err).ToNot(HaveOccurred()) - - artifact := sourcev1.Artifact{ - Path: filepath.Join(dir, "test.txt"), - } - g.Expect(s.MkdirAll(artifact)).To(Succeed()) - g.Expect(s.AtomicWriteFile(&artifact, bytes.NewReader([]byte("test")), 0o600)).To(Succeed()) - g.Expect(s.ArtifactExist(artifact)).To(BeTrue()) - - g.Expect(s.Remove(artifact)).To(Succeed()) - g.Expect(s.ArtifactExist(artifact)).To(BeFalse()) - }) - - t.Run("error if file does not exist", func(t *testing.T) { - g := NewWithT(t) - - dir := t.TempDir() - - s, err := NewStorage(dir, "", 0, 0) - g.Expect(err).ToNot(HaveOccurred()) - - artifact := sourcev1.Artifact{ - Path: filepath.Join(dir, "test.txt"), - } - - err = s.Remove(artifact) - g.Expect(err).To(HaveOccurred()) - g.Expect(errors.Is(err, os.ErrNotExist)).To(BeTrue()) - }) -} - -func TestStorageRemoveAllButCurrent(t *testing.T) { - t.Run("bad directory in archive", func(t *testing.T) { - dir := t.TempDir() - - s, err := NewStorage(dir, "hostname", time.Minute, 2) - if err != nil { - t.Fatalf("Valid path did not successfully return: %v", err) - } - - if _, err := s.RemoveAllButCurrent(sourcev1.Artifact{Path: filepath.Join(dir, "really", "nonexistent")}); err == nil { - t.Fatal("Did not error while pruning non-existent path") - } - }) - - t.Run("collect names of deleted items", func(t *testing.T) { - g := NewWithT(t) - dir := t.TempDir() - - s, err := NewStorage(dir, "hostname", time.Minute, 2) - g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage") - - artifact := sourcev1.Artifact{ - Path: filepath.Join("foo", "bar", "artifact1.tar.gz"), - } - - // Create artifact dir and artifacts. - artifactDir := filepath.Join(dir, "foo", "bar") - g.Expect(os.MkdirAll(artifactDir, 0o750)).NotTo(HaveOccurred()) - current := []string{ - filepath.Join(artifactDir, "artifact1.tar.gz"), - } - wantDeleted := []string{ - filepath.Join(artifactDir, "file1.txt"), - filepath.Join(artifactDir, "file2.txt"), - } - createFile := func(files []string) { - for _, c := range files { - f, err := os.Create(c) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(f.Close()).ToNot(HaveOccurred()) - } - } - createFile(current) - createFile(wantDeleted) - _, err = s.Symlink(artifact, "latest.tar.gz") - g.Expect(err).ToNot(HaveOccurred(), "failed to create symlink") - - deleted, err := s.RemoveAllButCurrent(artifact) - g.Expect(err).ToNot(HaveOccurred(), "failed to remove all but current") - g.Expect(deleted).To(Equal(wantDeleted)) - }) -} - -func TestStorageRemoveAll(t *testing.T) { - tests := []struct { - name string - artifactPath string - createArtifactPath bool - wantDeleted string - }{ - { - name: "delete non-existent path", - artifactPath: filepath.Join("foo", "bar", "artifact1.tar.gz"), - createArtifactPath: false, - wantDeleted: "", - }, - { - name: "delete existing path", - artifactPath: filepath.Join("foo", "bar", "artifact1.tar.gz"), - createArtifactPath: true, - wantDeleted: filepath.Join("foo", "bar"), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - dir := t.TempDir() - - s, err := NewStorage(dir, "hostname", time.Minute, 2) - g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage") - - artifact := sourcev1.Artifact{ - Path: tt.artifactPath, - } - - if tt.createArtifactPath { - g.Expect(os.MkdirAll(filepath.Join(dir, tt.artifactPath), 0o750)).ToNot(HaveOccurred()) - } - - deleted, err := s.RemoveAll(artifact) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(deleted).To(ContainSubstring(tt.wantDeleted), "unexpected deleted path") - }) - } -} - -func TestStorageCopyFromPath(t *testing.T) { - type File struct { - Name string - Content []byte - } - - dir := t.TempDir() - - storage, err := NewStorage(dir, "hostname", time.Minute, 2) - if err != nil { - t.Fatalf("error while bootstrapping storage: %v", err) - } - - createFile := func(file *File) (absPath string, err error) { - dir = t.TempDir() - absPath = filepath.Join(dir, file.Name) - if err = os.MkdirAll(filepath.Dir(absPath), 0o750); err != nil { - return - } - f, err := os.Create(absPath) - if err != nil { - return "", fmt.Errorf("could not create file %q: %w", absPath, err) - } - if n, err := f.Write(file.Content); err != nil { - f.Close() - return "", fmt.Errorf("could not write %d bytes to file %q: %w", n, f.Name(), err) - } - f.Close() - return - } - - matchFile := func(t *testing.T, storage *Storage, artifact sourcev1.Artifact, file *File, expectMismatch bool) { - c, err := os.ReadFile(storage.LocalPath(artifact)) - if err != nil { - t.Fatalf("failed reading file: %v", err) - } - if (string(c) != string(file.Content)) != expectMismatch { - t.Errorf("artifact content does not match and not expecting mismatch, got: %q, want: %q", string(c), string(file.Content)) - } - } - - tests := []struct { - name string - file *File - want *File - expectMismatch bool - }{ - { - name: "content match", - file: &File{ - Name: "manifest.yaml", - Content: []byte(`contents`), - }, - want: &File{ - Name: "manifest.yaml", - Content: []byte(`contents`), - }, - }, - { - name: "content not match", - file: &File{ - Name: "manifest.yaml", - Content: []byte(`contents`), - }, - want: &File{ - Name: "manifest.yaml", - Content: []byte(`mismatch contents`), - }, - expectMismatch: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - absPath, err := createFile(tt.file) - if err != nil { - t.Error(err) - return - } - artifact := sourcev1.Artifact{ - Path: filepath.Join(randStringRunes(10), randStringRunes(10), randStringRunes(10)), - } - if err := storage.MkdirAll(artifact); err != nil { - t.Fatalf("artifact directory creation failed: %v", err) - } - if err := storage.CopyFromPath(&artifact, absPath); err != nil { - t.Errorf("CopyFromPath() error = %v", err) - } - matchFile(t, storage, artifact, tt.want, tt.expectMismatch) - }) - } -} - -func TestStorage_getGarbageFiles(t *testing.T) { - artifactFolder := filepath.Join("foo", "bar") - tests := []struct { - name string - artifactPaths []string - createPause time.Duration - ttl time.Duration - maxItemsToBeRetained int - totalCountLimit int - wantDeleted []string - }{ - { - name: "delete files based on maxItemsToBeRetained", - artifactPaths: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact3.tar.gz"), - filepath.Join(artifactFolder, "artifact4.tar.gz"), - filepath.Join(artifactFolder, "artifact5.tar.gz"), - }, - createPause: time.Millisecond * 10, - ttl: time.Minute * 2, - totalCountLimit: 10, - maxItemsToBeRetained: 2, - wantDeleted: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact3.tar.gz"), - }, - }, - { - name: "delete files based on maxItemsToBeRetained, ignore lock files", - artifactPaths: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact1.tar.gz.lock"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz.lock"), - filepath.Join(artifactFolder, "artifact3.tar.gz"), - filepath.Join(artifactFolder, "artifact3.tar.gz.lock"), - filepath.Join(artifactFolder, "artifact4.tar.gz"), - filepath.Join(artifactFolder, "artifact5.tar.gz"), - }, - createPause: time.Millisecond * 10, - ttl: time.Minute * 2, - totalCountLimit: 10, - maxItemsToBeRetained: 2, - wantDeleted: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact3.tar.gz"), - }, - }, - { - name: "delete files based on ttl", - artifactPaths: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact3.tar.gz"), - filepath.Join(artifactFolder, "artifact4.tar.gz"), - filepath.Join(artifactFolder, "artifact5.tar.gz"), - }, - createPause: time.Second * 1, - ttl: time.Second*3 + time.Millisecond*500, - totalCountLimit: 10, - maxItemsToBeRetained: 4, - wantDeleted: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - }, - }, - { - name: "delete files based on ttl, ignore lock files", - artifactPaths: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact1.tar.gz.lock"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz.lock"), - filepath.Join(artifactFolder, "artifact3.tar.gz"), - filepath.Join(artifactFolder, "artifact4.tar.gz"), - filepath.Join(artifactFolder, "artifact5.tar.gz"), - }, - createPause: time.Second * 1, - ttl: time.Second*3 + time.Millisecond*500, - totalCountLimit: 10, - maxItemsToBeRetained: 4, - wantDeleted: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - }, - }, - { - name: "delete files based on ttl and maxItemsToBeRetained", - artifactPaths: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact3.tar.gz"), - filepath.Join(artifactFolder, "artifact4.tar.gz"), - filepath.Join(artifactFolder, "artifact5.tar.gz"), - filepath.Join(artifactFolder, "artifact6.tar.gz"), - }, - createPause: time.Second * 1, - ttl: time.Second*5 + time.Millisecond*500, - totalCountLimit: 10, - maxItemsToBeRetained: 4, - wantDeleted: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - }, - }, - { - name: "delete files based on ttl and maxItemsToBeRetained and totalCountLimit", - artifactPaths: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact3.tar.gz"), - filepath.Join(artifactFolder, "artifact4.tar.gz"), - filepath.Join(artifactFolder, "artifact5.tar.gz"), - filepath.Join(artifactFolder, "artifact6.tar.gz"), - }, - createPause: time.Millisecond * 500, - ttl: time.Millisecond * 500, - totalCountLimit: 3, - maxItemsToBeRetained: 2, - wantDeleted: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact3.tar.gz"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - dir := t.TempDir() - - s, err := NewStorage(dir, "hostname", tt.ttl, tt.maxItemsToBeRetained) - g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage") - - artifact := sourcev1.Artifact{ - Path: tt.artifactPaths[len(tt.artifactPaths)-1], - } - g.Expect(os.MkdirAll(filepath.Join(dir, artifactFolder), 0o750)).ToNot(HaveOccurred()) - for _, artifactPath := range tt.artifactPaths { - f, err := os.Create(filepath.Join(dir, artifactPath)) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(f.Close()).ToNot(HaveOccurred()) - time.Sleep(tt.createPause) - } - - deletedPaths, err := s.getGarbageFiles(artifact, tt.totalCountLimit, tt.maxItemsToBeRetained, tt.ttl) - g.Expect(err).ToNot(HaveOccurred(), "failed to collect garbage files") - g.Expect(len(tt.wantDeleted)).To(Equal(len(deletedPaths))) - for _, wantDeletedPath := range tt.wantDeleted { - present := false - for _, deletedPath := range deletedPaths { - if strings.Contains(deletedPath, wantDeletedPath) { - present = true - break - } - } - if !present { - g.Fail(fmt.Sprintf("expected file to be deleted, still exists: %s", wantDeletedPath)) - } - } - }) - } -} - -func TestStorage_GarbageCollect(t *testing.T) { - artifactFolder := filepath.Join("foo", "bar") - tests := []struct { - name string - artifactPaths []string - wantCollected []string - wantDeleted []string - wantErr string - ctxTimeout time.Duration - }{ - { - name: "garbage collects", - artifactPaths: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact1.tar.gz.lock"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz.lock"), - filepath.Join(artifactFolder, "artifact3.tar.gz"), - filepath.Join(artifactFolder, "artifact4.tar.gz"), - }, - wantCollected: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - }, - wantDeleted: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact1.tar.gz.lock"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz.lock"), - }, - ctxTimeout: time.Second * 1, - }, - { - name: "garbage collection fails with context timeout", - artifactPaths: []string{ - filepath.Join(artifactFolder, "artifact1.tar.gz"), - filepath.Join(artifactFolder, "artifact2.tar.gz"), - filepath.Join(artifactFolder, "artifact3.tar.gz"), - filepath.Join(artifactFolder, "artifact4.tar.gz"), - }, - wantErr: "context deadline exceeded", - ctxTimeout: time.Nanosecond * 1, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - dir := t.TempDir() - - s, err := NewStorage(dir, "hostname", time.Second*2, 2) - g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage") - - artifact := sourcev1.Artifact{ - Path: tt.artifactPaths[len(tt.artifactPaths)-1], - } - g.Expect(os.MkdirAll(filepath.Join(dir, artifactFolder), 0o750)).ToNot(HaveOccurred()) - for i, artifactPath := range tt.artifactPaths { - f, err := os.Create(filepath.Join(dir, artifactPath)) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(f.Close()).ToNot(HaveOccurred()) - if i != len(tt.artifactPaths)-1 { - time.Sleep(time.Second * 1) - } - } - - collectedPaths, err := s.GarbageCollect(context.TODO(), artifact, tt.ctxTimeout) - if tt.wantErr == "" { - g.Expect(err).ToNot(HaveOccurred(), "failed to collect garbage files") - } else { - g.Expect(err).To(HaveOccurred()) - g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) - } - if len(tt.wantCollected) > 0 { - g.Expect(len(tt.wantCollected)).To(Equal(len(collectedPaths))) - for _, wantCollectedPath := range tt.wantCollected { - present := false - for _, collectedPath := range collectedPaths { - if strings.Contains(collectedPath, wantCollectedPath) { - g.Expect(collectedPath).ToNot(BeAnExistingFile()) - present = true - break - } - } - if present == false { - g.Fail(fmt.Sprintf("expected file to be garbage collected, still exists: %s", wantCollectedPath)) - } - } - } - for _, delFile := range tt.wantDeleted { - g.Expect(filepath.Join(dir, delFile)).ToNot(BeAnExistingFile()) - } - }) - } -} - -func TestStorage_VerifyArtifact(t *testing.T) { - g := NewWithT(t) - - dir := t.TempDir() - s, err := NewStorage(dir, "", 0, 0) - g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage") - - g.Expect(os.WriteFile(filepath.Join(dir, "artifact"), []byte("test"), 0o600)).To(Succeed()) - - t.Run("artifact without digest", func(t *testing.T) { - g := NewWithT(t) - - err := s.VerifyArtifact(sourcev1.Artifact{}) - g.Expect(err).To(HaveOccurred()) - g.Expect(err).To(MatchError("artifact has no digest")) - }) - - t.Run("artifact with invalid digest", func(t *testing.T) { - g := NewWithT(t) - - err := s.VerifyArtifact(sourcev1.Artifact{Digest: "invalid"}) - g.Expect(err).To(HaveOccurred()) - g.Expect(err).To(MatchError("failed to parse artifact digest 'invalid': invalid checksum digest format")) - }) - - t.Run("artifact with invalid path", func(t *testing.T) { - g := NewWithT(t) - - err := s.VerifyArtifact(sourcev1.Artifact{ - Digest: "sha256:9ba7a35ce8acd3557fe30680ef193ca7a36bb5dc62788f30de7122a0a5beab69", - Path: "invalid", - }) - g.Expect(err).To(HaveOccurred()) - g.Expect(errors.Is(err, os.ErrNotExist)).To(BeTrue()) - }) - - t.Run("artifact with digest mismatch", func(t *testing.T) { - g := NewWithT(t) - - err := s.VerifyArtifact(sourcev1.Artifact{ - Digest: "sha256:9ba7a35ce8acd3557fe30680ef193ca7a36bb5dc62788f30de7122a0a5beab69", - Path: "artifact", - }) - g.Expect(err).To(HaveOccurred()) - g.Expect(err).To(MatchError("computed digest doesn't match 'sha256:9ba7a35ce8acd3557fe30680ef193ca7a36bb5dc62788f30de7122a0a5beab69'")) - }) - - t.Run("artifact with digest match", func(t *testing.T) { - g := NewWithT(t) - - err := s.VerifyArtifact(sourcev1.Artifact{ - Digest: "sha256:9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08", - Path: "artifact", - }) - g.Expect(err).ToNot(HaveOccurred()) - }) -} diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 89a51bea8..ad0365616 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -32,6 +32,10 @@ import ( "testing" "time" + "github.com/distribution/distribution/v3/configuration" + dockerRegistry "github.com/distribution/distribution/v3/registry" + _ "github.com/distribution/distribution/v3/registry/auth/htpasswd" + _ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory" "github.com/foxcpp/go-mockdns" "github.com/phayes/freeport" "github.com/sirupsen/logrus" @@ -43,19 +47,17 @@ import ( "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" - "github.com/distribution/distribution/v3/configuration" - dockerRegistry "github.com/distribution/distribution/v3/registry" - _ "github.com/distribution/distribution/v3/registry/auth/htpasswd" - _ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory" - + "github.com/fluxcd/pkg/artifact/config" + "github.com/fluxcd/pkg/artifact/digest" + "github.com/fluxcd/pkg/artifact/storage" "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/metrics" "github.com/fluxcd/pkg/runtime/testenv" "github.com/fluxcd/pkg/testserver" sourcev1 "github.com/fluxcd/source-controller/api/v1" - sourcev1beta2 "github.com/fluxcd/source-controller/api/v1beta2" "github.com/fluxcd/source-controller/internal/cache" // +kubebuilder:scaffold:imports ) @@ -82,7 +84,7 @@ const ( var ( k8sClient client.Client testEnv *testenv.Environment - testStorage *Storage + testStorage *storage.Storage testServer *testserver.ArtifactServer testMetricsH controller.Metrics ctx = ctrl.SetupSignalHandler() @@ -273,7 +275,6 @@ func TestMain(m *testing.M) { initTestTLS() utilruntime.Must(sourcev1.AddToScheme(scheme.Scheme)) - utilruntime.Must(sourcev1beta2.AddToScheme(scheme.Scheme)) testEnv = testenv.New( testenv.WithCRDPath(filepath.Join("..", "..", "config", "crd", "bases")), @@ -431,12 +432,20 @@ func initTestTLS() { } } -func newTestStorage(s *testserver.HTTPServer) (*Storage, error) { - storage, err := NewStorage(s.Root(), s.URL(), retentionTTL, retentionRecords) +func newTestStorage(s *testserver.HTTPServer) (*storage.Storage, error) { + opts := &config.Options{ + StoragePath: s.Root(), + StorageAddress: s.URL(), + StorageAdvAddress: s.URL(), + ArtifactRetentionTTL: retentionTTL, + ArtifactRetentionRecords: retentionRecords, + ArtifactDigestAlgo: digest.Canonical.String(), + } + st, err := storage.New(opts) if err != nil { return nil, err } - return storage, nil + return st, nil } var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890") @@ -452,3 +461,8 @@ func randStringRunes(n int) string { func int64p(i int64) *int64 { return &i } + +func logOCIRepoStatus(t *testing.T, obj *sourcev1.OCIRepository) { + sts, _ := yaml.Marshal(obj.Status) + t.Log(string(sts)) +} diff --git a/internal/controller/testdata/certs/ca-key.pem b/internal/controller/testdata/certs/ca-key.pem index b69de5ab5..5f78af275 100644 --- a/internal/controller/testdata/certs/ca-key.pem +++ b/internal/controller/testdata/certs/ca-key.pem @@ -1,5 +1,5 @@ -----BEGIN EC PRIVATE KEY----- -MHcCAQEEIOH/u9dMcpVcZ0+X9Fc78dCTj8SHuXawhLjhu/ej64WToAoGCCqGSM49 -AwEHoUQDQgAEruH/kPxtX3cyYR2G7TYmxLq6AHyzo/NGXc9XjGzdJutE2SQzn37H -dvSJbH+Lvqo9ik0uiJVRVdCYD1j7gNszGA== +MHcCAQEEICJFvVFVBSL0EteniBRfI9M1tm9Vmh9CKv7dhvZSqtV6oAoGCCqGSM49 +AwEHoUQDQgAE+EGQ9wZw/XIbyCwu7wvbzoGhpE2KtZwSUXboPEAgacfaqfgdT92D +If9qYie8umbgUymQnnqN8fRnT/wqqdBLDg== -----END EC PRIVATE KEY----- diff --git a/internal/controller/testdata/certs/ca.csr b/internal/controller/testdata/certs/ca.csr index baa8aeb26..ed5490ce2 100644 --- a/internal/controller/testdata/certs/ca.csr +++ b/internal/controller/testdata/certs/ca.csr @@ -1,9 +1,9 @@ -----BEGIN CERTIFICATE REQUEST----- -MIIBIDCBxgIBADAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49 -AgEGCCqGSM49AwEHA0IABK7h/5D8bV93MmEdhu02JsS6ugB8s6PzRl3PV4xs3Sbr -RNkkM59+x3b0iWx/i76qPYpNLoiVUVXQmA9Y+4DbMxigSzBJBgkqhkiG9w0BCQ4x +MIIBHzCBxgIBADAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49 +AgEGCCqGSM49AwEHA0IABPhBkPcGcP1yG8gsLu8L286BoaRNirWcElF26DxAIGnH +2qn4HU/dgyH/amInvLpm4FMpkJ56jfH0Z0/8KqnQSw6gSzBJBgkqhkiG9w0BCQ4x PDA6MDgGA1UdEQQxMC+CCWxvY2FsaG9zdIILZXhhbXBsZS5jb22CD3d3dy5leGFt -cGxlLmNvbYcEfwAAATAKBggqhkjOPQQDAgNJADBGAiEAkw85nyLhJssyCYsaFvRU -EErhu66xHPJug/nG50uV5OoCIQCUorrflOSxfChPeCe4xfwcPv7FpcCYbKVYtGzz -b34Wow== +cGxlLmNvbYcEfwAAATAKBggqhkjOPQQDAgNIADBFAiEA1PxOWSIrmLb5IeejHvfx +AkjpamR/GTLhSzXlGv1hCmsCIDSeZL2OF5R5k2v4giXiB6GUfmawykGkO2fIG1kq +5l5V -----END CERTIFICATE REQUEST----- diff --git a/internal/controller/testdata/certs/ca.pem b/internal/controller/testdata/certs/ca.pem index 080bd24e6..72644519d 100644 --- a/internal/controller/testdata/certs/ca.pem +++ b/internal/controller/testdata/certs/ca.pem @@ -1,11 +1,11 @@ -----BEGIN CERTIFICATE----- -MIIBhzCCAS2gAwIBAgIUdsAtiX3gN0uk7ddxASWYE/tdv0wwCgYIKoZIzj0EAwIw -GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjAwNDE3MDgxODAwWhcNMjUw -NDE2MDgxODAwWjAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49 -AgEGCCqGSM49AwEHA0IABK7h/5D8bV93MmEdhu02JsS6ugB8s6PzRl3PV4xs3Sbr -RNkkM59+x3b0iWx/i76qPYpNLoiVUVXQmA9Y+4DbMxijUzBRMA4GA1UdDwEB/wQE -AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQGyUiU1QEZiMAqjsnIYTwZ -4yp5wzAPBgNVHREECDAGhwR/AAABMAoGCCqGSM49BAMCA0gAMEUCIQDzdtvKdE8O -1+WRTZ9MuSiFYcrEz7Zne7VXouDEKqKEigIgM4WlbDeuNCKbqhqj+xZV0pa3rweb -OD8EjjCMY69RMO0= +MIIBiDCCAS2gAwIBAgIUCRPU/Fa1nIWlk7TUejHGI+WKJFAwCgYIKoZIzj0EAwIw +GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjUwNDIyMDcwNTAwWhcNMzAw +NDIxMDcwNTAwWjAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49 +AgEGCCqGSM49AwEHA0IABPhBkPcGcP1yG8gsLu8L286BoaRNirWcElF26DxAIGnH +2qn4HU/dgyH/amInvLpm4FMpkJ56jfH0Z0/8KqnQSw6jUzBRMA4GA1UdDwEB/wQE +AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBS+cS2gBCfSCltLUMNY0kG2 +mj9zEDAPBgNVHREECDAGhwR/AAABMAoGCCqGSM49BAMCA0kAMEYCIQC33kO/m+ab +i/2dlkg7hab4jCkFkxV3fWiOP0lbrLIMYQIhAPOcHeXmGE32apXKoZ6IfGJdMtz1 +3bkHYeqNs2qtpQ/5 -----END CERTIFICATE----- diff --git a/internal/controller/testdata/certs/client-key.pem b/internal/controller/testdata/certs/client-key.pem index b39c483d0..f55b40b4d 100644 --- a/internal/controller/testdata/certs/client-key.pem +++ b/internal/controller/testdata/certs/client-key.pem @@ -1,5 +1,5 @@ -----BEGIN EC PRIVATE KEY----- -MHcCAQEEICpqb1p1TH98yoFXEEt6JmWc/Snb8NaYyz8jfTOVDBLOoAoGCCqGSM49 -AwEHoUQDQgAERjzob4CCuyv+cYPyTYCPHwGuqSNGNuX3UGWpxvzwEqjYEWiePlOz -eJLk4DWaVX8CmVakNLsK/EHnBv9ErG7QYQ== +MHcCAQEEIFVLYwGEhqLW/WYnsA9om6cSxcgVsKnwIWXc34DF7LpwoAoGCCqGSM49 +AwEHoUQDQgAE5H76We32W5cQq8DRJT+pteyh53GUBiI5IbM+qVWgsCIFJEaSJKgs +mv1H7c3NhP292Pgr6vdWJACLQHzmpsVpmg== -----END EC PRIVATE KEY----- diff --git a/internal/controller/testdata/certs/client.csr b/internal/controller/testdata/certs/client.csr index 41f498804..3699ea27b 100644 --- a/internal/controller/testdata/certs/client.csr +++ b/internal/controller/testdata/certs/client.csr @@ -1,8 +1,8 @@ -----BEGIN CERTIFICATE REQUEST----- -MIIBHDCBwwIBADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG -CCqGSM49AwEHA0IABEY86G+Agrsr/nGD8k2Ajx8BrqkjRjbl91Blqcb88BKo2BFo -nj5Ts3iS5OA1mlV/AplWpDS7CvxB5wb/RKxu0GGgSzBJBgkqhkiG9w0BCQ4xPDA6 +MIIBGzCBwwIBADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG +CCqGSM49AwEHA0IABOR++lnt9luXEKvA0SU/qbXsoedxlAYiOSGzPqlVoLAiBSRG +kiSoLJr9R+3NzYT9vdj4K+r3ViQAi0B85qbFaZqgSzBJBgkqhkiG9w0BCQ4xPDA6 MDgGA1UdEQQxMC+CCWxvY2FsaG9zdIILZXhhbXBsZS5jb22CD3d3dy5leGFtcGxl -LmNvbYcEfwAAATAKBggqhkjOPQQDAgNIADBFAiAHmtr9fDDx5eyFfY7r5m8xA4Wh -Jm+TB6/czvXRNNOKzAIhAN7ln6BpneEm2oqIBGqvfc3pETC6jdGJxCfYw+X+7von +LmNvbYcEfwAAATAKBggqhkjOPQQDAgNHADBEAiB0px2gw2ICFz26zAajtJyoNHl+ +inOXY5ohtzP4ag+NXQIgAbjIsOUuQ7JT31DdI6yCVfO014hHawtEsdV4rxTrQMA= -----END CERTIFICATE REQUEST----- diff --git a/internal/controller/testdata/certs/client.pem b/internal/controller/testdata/certs/client.pem index 4a85663ea..9db876e59 100644 --- a/internal/controller/testdata/certs/client.pem +++ b/internal/controller/testdata/certs/client.pem @@ -1,13 +1,13 @@ -----BEGIN CERTIFICATE----- -MIIB7DCCAZKgAwIBAgIUPJmKtZ6CfSxybx2BSsVS5EVun0swCgYIKoZIzj0EAwIw -GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjMwNzE5MTExMzAwWhcNMzMw -NzE2MTExMzAwWjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG -CCqGSM49AwEHA0IABEY86G+Agrsr/nGD8k2Ajx8BrqkjRjbl91Blqcb88BKo2BFo -nj5Ts3iS5OA1mlV/AplWpDS7CvxB5wb/RKxu0GGjgbowgbcwDgYDVR0PAQH/BAQD +MIIB7DCCAZKgAwIBAgIUPH5zyEsXoFMCMkZaM2s6YtnoQcgwCgYIKoZIzj0EAwIw +GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjUwNDIyMDcwNTAwWhcNMzUw +NDIwMDcwNTAwWjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG +CCqGSM49AwEHA0IABOR++lnt9luXEKvA0SU/qbXsoedxlAYiOSGzPqlVoLAiBSRG +kiSoLJr9R+3NzYT9vdj4K+r3ViQAi0B85qbFaZqjgbowgbcwDgYDVR0PAQH/BAQD AgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAA -MB0GA1UdDgQWBBTgAyCQoH/EJqz/nY5DJa/uvWWshzAfBgNVHSMEGDAWgBQGyUiU -1QEZiMAqjsnIYTwZ4yp5wzA4BgNVHREEMTAvgglsb2NhbGhvc3SCC2V4YW1wbGUu -Y29tgg93d3cuZXhhbXBsZS5jb22HBH8AAAEwCgYIKoZIzj0EAwIDSAAwRQIgKSJH -YvhKiXcUUzRoL6FsXQeAlhemSg3lD9se+BhRF8ECIQDx2UpWFLDe5NOPqhrcR1Sd -haFriAG8eR1yD3u3nJvY6g== +MB0GA1UdDgQWBBTqud4vpysQdb1/5K3RoDXvBdQGgzAfBgNVHSMEGDAWgBS+cS2g +BCfSCltLUMNY0kG2mj9zEDA4BgNVHREEMTAvgglsb2NhbGhvc3SCC2V4YW1wbGUu +Y29tgg93d3cuZXhhbXBsZS5jb22HBH8AAAEwCgYIKoZIzj0EAwIDSAAwRQIhAM0u +Eo6u3BDtw5bezhLa/THDy4QT63ktpAff9i/QJOErAiAifOvW7n5ZTLjjSnJ+dCtr +Avtupcg1WLyryhliqtNKhg== -----END CERTIFICATE----- diff --git a/internal/controller/testdata/certs/server-key.pem b/internal/controller/testdata/certs/server-key.pem index 5054ff39f..64d7da136 100644 --- a/internal/controller/testdata/certs/server-key.pem +++ b/internal/controller/testdata/certs/server-key.pem @@ -1,5 +1,5 @@ -----BEGIN EC PRIVATE KEY----- -MHcCAQEEIKQbEXV6nljOHMmPrWVWQ+JrAE5wsbE9iMhfY7wlJgXOoAoGCCqGSM49 -AwEHoUQDQgAE+53oBGlrvVUTelSGYji8GNHVhVg8jOs1PeeLuXCIZjQmctHLFEq3 -fE+mGxCL93MtpYzlwIWBf0m7pEGQre6bzg== +MHcCAQEEIH19RQir/x9wHNAvHITu7/3Y4ckQ3GsNyEGYF3/nalheoAoGCCqGSM49 +AwEHoUQDQgAEvqlooNIpRmCjv9yBzjqoyXZvcU8zo9npYm3HPX7TReYetrkkJh/P +6a5NDJhnWemcj9iZdm2kGTE7MCgGi4mRog== -----END EC PRIVATE KEY----- diff --git a/internal/controller/testdata/certs/server.csr b/internal/controller/testdata/certs/server.csr index 5caf7b39c..b0fce1781 100644 --- a/internal/controller/testdata/certs/server.csr +++ b/internal/controller/testdata/certs/server.csr @@ -1,8 +1,8 @@ -----BEGIN CERTIFICATE REQUEST----- -MIIBHDCBwwIBADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG -CCqGSM49AwEHA0IABPud6ARpa71VE3pUhmI4vBjR1YVYPIzrNT3ni7lwiGY0JnLR -yxRKt3xPphsQi/dzLaWM5cCFgX9Ju6RBkK3um86gSzBJBgkqhkiG9w0BCQ4xPDA6 +MIIBGzCBwwIBADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG +CCqGSM49AwEHA0IABL6paKDSKUZgo7/cgc46qMl2b3FPM6PZ6WJtxz1+00XmHra5 +JCYfz+muTQyYZ1npnI/YmXZtpBkxOzAoBouJkaKgSzBJBgkqhkiG9w0BCQ4xPDA6 MDgGA1UdEQQxMC+CCWxvY2FsaG9zdIILZXhhbXBsZS5jb22CD3d3dy5leGFtcGxl -LmNvbYcEfwAAATAKBggqhkjOPQQDAgNIADBFAiB5A6wvQ5x6g/zhiyn+wLzXsOaB -Gb/F25p/zTHHQqZbkwIhAPUgWzy/2bs6eZEi97bSlaRdmrqHwqT842t5sEwGyXNV +LmNvbYcEfwAAATAKBggqhkjOPQQDAgNHADBEAiAJbvDLjrCkTRvTjrv2wXLN9Hgu +p6SrTQJUWlIj3S8DggIgJraxPvnwfeKE5dM7ZgJXADHy838h04dQ+Za7hS899V8= -----END CERTIFICATE REQUEST----- diff --git a/internal/controller/testdata/certs/server.pem b/internal/controller/testdata/certs/server.pem index 11c655a0b..f3345e3b2 100644 --- a/internal/controller/testdata/certs/server.pem +++ b/internal/controller/testdata/certs/server.pem @@ -1,13 +1,13 @@ -----BEGIN CERTIFICATE----- -MIIB7TCCAZKgAwIBAgIUB+17B8PU05wVTzRHLeG+S+ybZK4wCgYIKoZIzj0EAwIw -GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjAwNDE3MDgxODAwWhcNMzAw -NDE1MDgxODAwWjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG -CCqGSM49AwEHA0IABPud6ARpa71VE3pUhmI4vBjR1YVYPIzrNT3ni7lwiGY0JnLR -yxRKt3xPphsQi/dzLaWM5cCFgX9Ju6RBkK3um86jgbowgbcwDgYDVR0PAQH/BAQD +MIIB6zCCAZKgAwIBAgIUSGuttQSdoyWQzeZ6GkiKORYYUvQwCgYIKoZIzj0EAwIw +GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjUwNDIyMDcwNTAwWhcNMzUw +NDIwMDcwNTAwWjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG +CCqGSM49AwEHA0IABL6paKDSKUZgo7/cgc46qMl2b3FPM6PZ6WJtxz1+00XmHra5 +JCYfz+muTQyYZ1npnI/YmXZtpBkxOzAoBouJkaKjgbowgbcwDgYDVR0PAQH/BAQD AgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAA -MB0GA1UdDgQWBBTM8HS5EIlVMBYv/300jN8PEArUgDAfBgNVHSMEGDAWgBQGyUiU -1QEZiMAqjsnIYTwZ4yp5wzA4BgNVHREEMTAvgglsb2NhbGhvc3SCC2V4YW1wbGUu -Y29tgg93d3cuZXhhbXBsZS5jb22HBH8AAAEwCgYIKoZIzj0EAwIDSQAwRgIhAOgB -5W82FEgiTTOmsNRekkK5jUPbj4D4eHtb2/BI7ph4AiEA2AxHASIFBdv5b7Qf5prb -bdNmUCzAvVuCAKuMjg2OPrE= +MB0GA1UdDgQWBBSNrNAk9jWUcFjxjAKzuDwsBrG1NDAfBgNVHSMEGDAWgBS+cS2g +BCfSCltLUMNY0kG2mj9zEDA4BgNVHREEMTAvgglsb2NhbGhvc3SCC2V4YW1wbGUu +Y29tgg93d3cuZXhhbXBsZS5jb22HBH8AAAEwCgYIKoZIzj0EAwIDRwAwRAIgIcrb +xGgcRsmP/R6Qo+Xe/w1UvNDaWJfsWO+hq1DtOQgCIEyGi3ClowsGnNpo734ArWbG +taem7qVKZJmCWRM6DFuT -----END CERTIFICATE----- diff --git a/internal/digest/digest.go b/internal/digest/digest.go deleted file mode 100644 index 6b1117398..000000000 --- a/internal/digest/digest.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package digest - -import ( - "crypto" - _ "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" - "fmt" - - "github.com/opencontainers/go-digest" - _ "github.com/opencontainers/go-digest/blake3" -) - -const ( - SHA1 digest.Algorithm = "sha1" -) - -var ( - // Canonical is the primary digest algorithm used to calculate checksums. - Canonical = digest.SHA256 -) - -func init() { - // Register SHA-1 algorithm for support of e.g. Git commit SHAs. - digest.RegisterAlgorithm(SHA1, crypto.SHA1) -} - -// AlgorithmForName returns the digest algorithm for the given name, or an -// error of type digest.ErrDigestUnsupported if the algorithm is unavailable. -func AlgorithmForName(name string) (digest.Algorithm, error) { - a := digest.Algorithm(name) - if !a.Available() { - return "", fmt.Errorf("%w: %s", digest.ErrDigestUnsupported, name) - } - return a, nil -} diff --git a/internal/digest/digest_test.go b/internal/digest/digest_test.go deleted file mode 100644 index 3030c2d11..000000000 --- a/internal/digest/digest_test.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package digest - -import ( - "errors" - "testing" - - . "github.com/onsi/gomega" - "github.com/opencontainers/go-digest" -) - -func TestAlgorithmForName(t *testing.T) { - tests := []struct { - name string - want digest.Algorithm - wantErr error - }{ - { - name: "sha256", - want: digest.SHA256, - }, - { - name: "sha384", - want: digest.SHA384, - }, - { - name: "sha512", - want: digest.SHA512, - }, - { - name: "blake3", - want: digest.BLAKE3, - }, - { - name: "sha1", - want: SHA1, - }, - { - name: "not-available", - wantErr: digest.ErrDigestUnsupported, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - got, err := AlgorithmForName(tt.name) - if tt.wantErr != nil { - g.Expect(err).To(HaveOccurred()) - g.Expect(errors.Is(err, tt.wantErr)).To(BeTrue()) - return - } - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(got).To(Equal(tt.want)) - }) - } -} diff --git a/internal/digest/writer.go b/internal/digest/writer.go deleted file mode 100644 index 4783f8b84..000000000 --- a/internal/digest/writer.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package digest - -import ( - "fmt" - "io" - - "github.com/opencontainers/go-digest" -) - -// MultiDigester is a digester that writes to multiple digesters to calculate -// the checksum of different algorithms. -type MultiDigester struct { - d map[digest.Algorithm]digest.Digester -} - -// NewMultiDigester returns a new MultiDigester that writes to newly -// initialized digesters for the given algorithms. If a provided algorithm is -// not available, it returns a digest.ErrDigestUnsupported error. -func NewMultiDigester(algos ...digest.Algorithm) (*MultiDigester, error) { - d := make(map[digest.Algorithm]digest.Digester, len(algos)) - for _, a := range algos { - if _, ok := d[a]; ok { - continue - } - if !a.Available() { - return nil, fmt.Errorf("%w: %s", digest.ErrDigestUnsupported, a) - } - d[a] = a.Digester() - } - return &MultiDigester{d: d}, nil -} - -// Write writes p to all underlying digesters. -func (w *MultiDigester) Write(p []byte) (n int, err error) { - for _, d := range w.d { - n, err = d.Hash().Write(p) - if err != nil { - return - } - if n != len(p) { - err = io.ErrShortWrite - return - } - } - return len(p), nil -} - -// Digest returns the digest of the data written to the digester of the given -// algorithm, or an empty digest if the algorithm is not available. -func (w *MultiDigester) Digest(algo digest.Algorithm) digest.Digest { - if d, ok := w.d[algo]; ok { - return d.Digest() - } - return "" -} diff --git a/internal/digest/writer_test.go b/internal/digest/writer_test.go deleted file mode 100644 index 9ae63b882..000000000 --- a/internal/digest/writer_test.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package digest - -import ( - "crypto/rand" - "testing" - - . "github.com/onsi/gomega" - "github.com/opencontainers/go-digest" -) - -func TestNewMultiDigester(t *testing.T) { - t.Run("constructs a MultiDigester", func(t *testing.T) { - g := NewWithT(t) - - d, err := NewMultiDigester(Canonical, digest.SHA512) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(d.d).To(HaveLen(2)) - }) - - t.Run("returns an error if an algorithm is not available", func(t *testing.T) { - g := NewWithT(t) - - _, err := NewMultiDigester(digest.Algorithm("not-available")) - g.Expect(err).To(HaveOccurred()) - }) -} - -func TestMultiDigester_Write(t *testing.T) { - t.Run("writes to all digesters", func(t *testing.T) { - g := NewWithT(t) - - d, err := NewMultiDigester(Canonical, digest.SHA512) - g.Expect(err).ToNot(HaveOccurred()) - - n, err := d.Write([]byte("hello")) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(n).To(Equal(5)) - - n, err = d.Write([]byte(" world")) - g.Expect(err).ToNot(HaveOccurred()) - g.Expect(n).To(Equal(6)) - - g.Expect(d.Digest(Canonical)).To(BeEquivalentTo("sha256:b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9")) - g.Expect(d.Digest(digest.SHA512)).To(BeEquivalentTo("sha512:309ecc489c12d6eb4cc40f50c902f2b4d0ed77ee511a7c7a9bcd3ca86d4cd86f989dd35bc5ff499670da34255b45b0cfd830e81f605dcf7dc5542e93ae9cd76f")) - }) -} - -func TestMultiDigester_Digest(t *testing.T) { - t.Run("returns the digest for the given algorithm", func(t *testing.T) { - g := NewWithT(t) - - d, err := NewMultiDigester(Canonical, digest.SHA512) - g.Expect(err).ToNot(HaveOccurred()) - - g.Expect(d.Digest(Canonical)).To(BeEquivalentTo("sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")) - g.Expect(d.Digest(digest.SHA512)).To(BeEquivalentTo("sha512:cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e")) - }) - - t.Run("returns an empty digest if the algorithm is not supported", func(t *testing.T) { - g := NewWithT(t) - - d, err := NewMultiDigester(Canonical, digest.SHA512) - g.Expect(err).ToNot(HaveOccurred()) - - g.Expect(d.Digest(digest.Algorithm("not-available"))).To(BeEmpty()) - }) -} - -func benchmarkMultiDigesterWrite(b *testing.B, algos []digest.Algorithm, pSize int64) { - md, err := NewMultiDigester(algos...) - if err != nil { - b.Fatal(err) - } - - p := make([]byte, pSize) - if _, err = rand.Read(p); err != nil { - b.Fatal(err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - md.Write(p) - } -} - -func BenchmarkMultiDigester_Write(b *testing.B) { - const pSize = 1024 * 2 - - b.Run("sha1", func(b *testing.B) { - benchmarkMultiDigesterWrite(b, []digest.Algorithm{SHA1}, pSize) - }) - - b.Run("sha256", func(b *testing.B) { - benchmarkMultiDigesterWrite(b, []digest.Algorithm{digest.SHA256}, pSize) - }) - - b.Run("blake3", func(b *testing.B) { - benchmarkMultiDigesterWrite(b, []digest.Algorithm{digest.BLAKE3}, pSize) - }) - - b.Run("sha256+sha384", func(b *testing.B) { - benchmarkMultiDigesterWrite(b, []digest.Algorithm{digest.SHA256, digest.SHA384}, pSize) - }) - - b.Run("sha256+sha512", func(b *testing.B) { - benchmarkMultiDigesterWrite(b, []digest.Algorithm{digest.SHA256, digest.SHA512}, pSize) - }) - - b.Run("sha256+blake3", func(b *testing.B) { - benchmarkMultiDigesterWrite(b, []digest.Algorithm{digest.SHA256, digest.BLAKE3}, pSize) - }) -} diff --git a/internal/features/features.go b/internal/features/features.go index c2622ce32..edb9beb17 100644 --- a/internal/features/features.go +++ b/internal/features/features.go @@ -19,7 +19,10 @@ limitations under the License. // states. package features -import feathelper "github.com/fluxcd/pkg/runtime/features" +import ( + "github.com/fluxcd/pkg/auth" + feathelper "github.com/fluxcd/pkg/runtime/features" +) const ( // CacheSecretsAndConfigMaps controls whether secrets and configmaps should be cached. @@ -35,6 +38,10 @@ var features = map[string]bool{ CacheSecretsAndConfigMaps: false, } +func init() { + auth.SetFeatureGates(features) +} + // FeatureGates contains a list of all supported feature gates and // their default values. func FeatureGates() map[string]bool { diff --git a/internal/fs/LICENSE b/internal/fs/LICENSE deleted file mode 100644 index a2dd15faf..000000000 --- a/internal/fs/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2014 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/internal/fs/fs.go b/internal/fs/fs.go deleted file mode 100644 index 21cf96e69..000000000 --- a/internal/fs/fs.go +++ /dev/null @@ -1,345 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fs - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "syscall" -) - -// RenameWithFallback attempts to rename a file or directory, but falls back to -// copying in the event of a cross-device link error. If the fallback copy -// succeeds, src is still removed, emulating normal rename behavior. -func RenameWithFallback(src, dst string) error { - _, err := os.Stat(src) - if err != nil { - return fmt.Errorf("cannot stat %s: %w", src, err) - } - - err = os.Rename(src, dst) - if err == nil { - return nil - } - - return renameFallback(err, src, dst) -} - -// renameByCopy attempts to rename a file or directory by copying it to the -// destination and then removing the src thus emulating the rename behavior. -func renameByCopy(src, dst string) error { - var cerr error - if dir, _ := IsDir(src); dir { - cerr = CopyDir(src, dst) - if cerr != nil { - cerr = fmt.Errorf("copying directory failed: %w", cerr) - } - } else { - cerr = copyFile(src, dst) - if cerr != nil { - cerr = fmt.Errorf("copying file failed: %w", cerr) - } - } - - if cerr != nil { - return fmt.Errorf("rename fallback failed: cannot rename %s to %s: %w", src, dst, cerr) - } - - if err := os.RemoveAll(src); err != nil { - return fmt.Errorf("cannot delete %s: %w", src, err) - } - - return nil -} - -var ( - errSrcNotDir = errors.New("source is not a directory") - errDstExist = errors.New("destination already exists") -) - -// CopyDir recursively copies a directory tree, attempting to preserve permissions. -// Source directory must exist, destination directory must *not* exist. -func CopyDir(src, dst string) error { - src = filepath.Clean(src) - dst = filepath.Clean(dst) - - // We use os.Lstat() here to ensure we don't fall in a loop where a symlink - // actually links to a one of its parent directories. - fi, err := os.Lstat(src) - if err != nil { - return err - } - if !fi.IsDir() { - return errSrcNotDir - } - - _, err = os.Stat(dst) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - return errDstExist - } - - if err = os.MkdirAll(dst, fi.Mode()); err != nil { - return fmt.Errorf("cannot mkdir %s: %w", dst, err) - } - - entries, err := os.ReadDir(src) - if err != nil { - return fmt.Errorf("cannot read directory %s: %w", dst, err) - } - - for _, entry := range entries { - srcPath := filepath.Join(src, entry.Name()) - dstPath := filepath.Join(dst, entry.Name()) - - if entry.IsDir() { - if err = CopyDir(srcPath, dstPath); err != nil { - return fmt.Errorf("copying directory failed: %w", err) - } - } else { - // This will include symlinks, which is what we want when - // copying things. - if err = copyFile(srcPath, dstPath); err != nil { - return fmt.Errorf("copying file failed: %w", err) - } - } - } - - return nil -} - -// copyFile copies the contents of the file named src to the file named -// by dst. The file will be created if it does not already exist. If the -// destination file exists, all its contents will be replaced by the contents -// of the source file. The file mode will be copied from the source. -func copyFile(src, dst string) (err error) { - if sym, err := IsSymlink(src); err != nil { - return fmt.Errorf("symlink check failed: %w", err) - } else if sym { - if err := cloneSymlink(src, dst); err != nil { - if runtime.GOOS == "windows" { - // If cloning the symlink fails on Windows because the user - // does not have the required privileges, ignore the error and - // fall back to copying the file contents. - // - // ERROR_PRIVILEGE_NOT_HELD is 1314 (0x522): - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms681385(v=vs.85).aspx - if lerr, ok := err.(*os.LinkError); ok && lerr.Err != syscall.Errno(1314) { - return err - } - } else { - return err - } - } else { - return nil - } - } - - in, err := os.Open(src) - if err != nil { - return - } - defer in.Close() - - out, err := os.Create(dst) - if err != nil { - return - } - - if _, err = io.Copy(out, in); err != nil { - out.Close() - return - } - - // Check for write errors on Close - if err = out.Close(); err != nil { - return - } - - si, err := os.Stat(src) - if err != nil { - return - } - - // Temporary fix for Go < 1.9 - // - // See: https://github.com/golang/dep/issues/774 - // and https://github.com/golang/go/issues/20829 - if runtime.GOOS == "windows" { - dst = fixLongPath(dst) - } - err = os.Chmod(dst, si.Mode()) - - return -} - -// cloneSymlink will create a new symlink that points to the resolved path of sl. -// If sl is a relative symlink, dst will also be a relative symlink. -func cloneSymlink(sl, dst string) error { - resolved, err := os.Readlink(sl) - if err != nil { - return err - } - - return os.Symlink(resolved, dst) -} - -// IsDir determines is the path given is a directory or not. -func IsDir(name string) (bool, error) { - fi, err := os.Stat(name) - if err != nil { - return false, err - } - if !fi.IsDir() { - return false, fmt.Errorf("%q is not a directory", name) - } - return true, nil -} - -// IsSymlink determines if the given path is a symbolic link. -func IsSymlink(path string) (bool, error) { - l, err := os.Lstat(path) - if err != nil { - return false, err - } - - return l.Mode()&os.ModeSymlink == os.ModeSymlink, nil -} - -// fixLongPath returns the extended-length (\\?\-prefixed) form of -// path when needed, in order to avoid the default 260 character file -// path limit imposed by Windows. If path is not easily converted to -// the extended-length form (for example, if path is a relative path -// or contains .. elements), or is short enough, fixLongPath returns -// path unmodified. -// -// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath -func fixLongPath(path string) string { - // Do nothing (and don't allocate) if the path is "short". - // Empirically (at least on the Windows Server 2013 builder), - // the kernel is arbitrarily okay with < 248 bytes. That - // matches what the docs above say: - // "When using an API to create a directory, the specified - // path cannot be so long that you cannot append an 8.3 file - // name (that is, the directory name cannot exceed MAX_PATH - // minus 12)." Since MAX_PATH is 260, 260 - 12 = 248. - // - // The MSDN docs appear to say that a normal path that is 248 bytes long - // will work; empirically the path must be less then 248 bytes long. - if len(path) < 248 { - // Don't fix. (This is how Go 1.7 and earlier worked, - // not automatically generating the \\?\ form) - return path - } - - // The extended form begins with \\?\, as in - // \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt. - // The extended form disables evaluation of . and .. path - // elements and disables the interpretation of / as equivalent - // to \. The conversion here rewrites / to \ and elides - // . elements as well as trailing or duplicate separators. For - // simplicity it avoids the conversion entirely for relative - // paths or paths containing .. elements. For now, - // \\server\share paths are not converted to - // \\?\UNC\server\share paths because the rules for doing so - // are less well-specified. - if len(path) >= 2 && path[:2] == `\\` { - // Don't canonicalize UNC paths. - return path - } - if !isAbs(path) { - // Relative path - return path - } - - const prefix = `\\?` - - pathbuf := make([]byte, len(prefix)+len(path)+len(`\`)) - copy(pathbuf, prefix) - n := len(path) - r, w := 0, len(prefix) - for r < n { - switch { - case os.IsPathSeparator(path[r]): - // empty block - r++ - case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])): - // /./ - r++ - case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])): - // /../ is currently unhandled - return path - default: - pathbuf[w] = '\\' - w++ - for ; r < n && !os.IsPathSeparator(path[r]); r++ { - pathbuf[w] = path[r] - w++ - } - } - } - // A drive's root directory needs a trailing \ - if w == len(`\\?\c:`) { - pathbuf[w] = '\\' - w++ - } - return string(pathbuf[:w]) -} - -func isAbs(path string) (b bool) { - v := volumeName(path) - if v == "" { - return false - } - path = path[len(v):] - if path == "" { - return false - } - return os.IsPathSeparator(path[0]) -} - -func volumeName(path string) (v string) { - if len(path) < 2 { - return "" - } - // with drive letter - c := path[0] - if path[1] == ':' && - ('0' <= c && c <= '9' || 'a' <= c && c <= 'z' || - 'A' <= c && c <= 'Z') { - return path[:2] - } - // is it UNC - if l := len(path); l >= 5 && os.IsPathSeparator(path[0]) && os.IsPathSeparator(path[1]) && - !os.IsPathSeparator(path[2]) && path[2] != '.' { - // first, leading `\\` and next shouldn't be `\`. its server name. - for n := 3; n < l-1; n++ { - // second, next '\' shouldn't be repeated. - if os.IsPathSeparator(path[n]) { - n++ - // third, following something characters. its share name. - if !os.IsPathSeparator(path[n]) { - if path[n] == '.' { - break - } - for ; n < l; n++ { - if os.IsPathSeparator(path[n]) { - break - } - } - return path[:n] - } - break - } - } - } - return "" -} diff --git a/internal/fs/fs_test.go b/internal/fs/fs_test.go deleted file mode 100644 index 9a1c5ef99..000000000 --- a/internal/fs/fs_test.go +++ /dev/null @@ -1,590 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package fs - -import ( - "fmt" - "os" - "os/exec" - "path/filepath" - "runtime" - "sync" - "testing" -) - -var ( - mu sync.Mutex -) - -func TestRenameWithFallback(t *testing.T) { - dir := t.TempDir() - - if err := RenameWithFallback(filepath.Join(dir, "does_not_exists"), filepath.Join(dir, "dst")); err == nil { - t.Fatal("expected an error for non existing file, but got nil") - } - - srcpath := filepath.Join(dir, "src") - - if srcf, err := os.Create(srcpath); err != nil { - t.Fatal(err) - } else { - srcf.Close() - } - - if err := RenameWithFallback(srcpath, filepath.Join(dir, "dst")); err != nil { - t.Fatal(err) - } - - srcpath = filepath.Join(dir, "a") - if err := os.MkdirAll(srcpath, 0o770); err != nil { - t.Fatal(err) - } - - dstpath := filepath.Join(dir, "b") - if err := os.MkdirAll(dstpath, 0o770); err != nil { - t.Fatal(err) - } - - if err := RenameWithFallback(srcpath, dstpath); err == nil { - t.Fatal("expected an error if dst is an existing directory, but got nil") - } -} - -func TestCopyDir(t *testing.T) { - dir := t.TempDir() - - srcdir := filepath.Join(dir, "src") - if err := os.MkdirAll(srcdir, 0o750); err != nil { - t.Fatal(err) - } - - files := []struct { - path string - contents string - fi os.FileInfo - }{ - {path: "myfile", contents: "hello world"}, - {path: filepath.Join("subdir", "file"), contents: "subdir file"}, - } - - // Create structure indicated in 'files' - for i, file := range files { - fn := filepath.Join(srcdir, file.path) - dn := filepath.Dir(fn) - if err := os.MkdirAll(dn, 0o750); err != nil { - t.Fatal(err) - } - - fh, err := os.Create(fn) - if err != nil { - t.Fatal(err) - } - - if _, err = fh.Write([]byte(file.contents)); err != nil { - t.Fatal(err) - } - fh.Close() - - files[i].fi, err = os.Stat(fn) - if err != nil { - t.Fatal(err) - } - } - - destdir := filepath.Join(dir, "dest") - if err := CopyDir(srcdir, destdir); err != nil { - t.Fatal(err) - } - - // Compare copy against structure indicated in 'files' - for _, file := range files { - fn := filepath.Join(srcdir, file.path) - dn := filepath.Dir(fn) - dirOK, err := IsDir(dn) - if err != nil { - t.Fatal(err) - } - if !dirOK { - t.Fatalf("expected %s to be a directory", dn) - } - - got, err := os.ReadFile(fn) - if err != nil { - t.Fatal(err) - } - - if file.contents != string(got) { - t.Fatalf("expected: %s, got: %s", file.contents, string(got)) - } - - gotinfo, err := os.Stat(fn) - if err != nil { - t.Fatal(err) - } - - if file.fi.Mode() != gotinfo.Mode() { - t.Fatalf("expected %s: %#v\n to be the same mode as %s: %#v", - file.path, file.fi.Mode(), fn, gotinfo.Mode()) - } - } -} - -func TestCopyDirFail_SrcInaccessible(t *testing.T) { - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in - // Microsoft Windows. Skipping this this until a - // compatible implementation is provided. - t.Skip("skipping on windows") - } - - var srcdir, dstdir string - - setupInaccessibleDir(t, func(dir string) error { - srcdir = filepath.Join(dir, "src") - return os.MkdirAll(srcdir, 0o750) - }) - - dir := t.TempDir() - - dstdir = filepath.Join(dir, "dst") - if err := CopyDir(srcdir, dstdir); err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } -} - -func TestCopyDirFail_DstInaccessible(t *testing.T) { - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in - // Microsoft Windows. Skipping this this until a - // compatible implementation is provided. - t.Skip("skipping on windows") - } - - var srcdir, dstdir string - - dir := t.TempDir() - - srcdir = filepath.Join(dir, "src") - if err := os.MkdirAll(srcdir, 0o750); err != nil { - t.Fatal(err) - } - - setupInaccessibleDir(t, func(dir string) error { - dstdir = filepath.Join(dir, "dst") - return nil - }) - - if err := CopyDir(srcdir, dstdir); err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } -} - -func TestCopyDirFail_SrcIsNotDir(t *testing.T) { - var srcdir, dstdir string - - dir := t.TempDir() - - srcdir = filepath.Join(dir, "src") - if _, err := os.Create(srcdir); err != nil { - t.Fatal(err) - } - - dstdir = filepath.Join(dir, "dst") - - err := CopyDir(srcdir, dstdir) - if err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } - - if err != errSrcNotDir { - t.Fatalf("expected %v error for CopyDir(%s, %s), got %s", errSrcNotDir, srcdir, dstdir, err) - } - -} - -func TestCopyDirFail_DstExists(t *testing.T) { - var srcdir, dstdir string - - dir := t.TempDir() - - srcdir = filepath.Join(dir, "src") - if err := os.MkdirAll(srcdir, 0o750); err != nil { - t.Fatal(err) - } - - dstdir = filepath.Join(dir, "dst") - if err := os.MkdirAll(dstdir, 0o750); err != nil { - t.Fatal(err) - } - - err := CopyDir(srcdir, dstdir) - if err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } - - if err != errDstExist { - t.Fatalf("expected %v error for CopyDir(%s, %s), got %s", errDstExist, srcdir, dstdir, err) - } -} - -func TestCopyDirFailOpen(t *testing.T) { - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in - // Microsoft Windows. os.Chmod(..., 0o222) below is not - // enough for the file to be readonly, and os.Chmod(..., - // 0000) returns an invalid argument error. Skipping - // this this until a compatible implementation is - // provided. - t.Skip("skipping on windows") - } - - var srcdir, dstdir string - - dir := t.TempDir() - - srcdir = filepath.Join(dir, "src") - if err := os.MkdirAll(srcdir, 0o750); err != nil { - t.Fatal(err) - } - - srcfn := filepath.Join(srcdir, "file") - srcf, err := os.Create(srcfn) - if err != nil { - t.Fatal(err) - } - srcf.Close() - - // setup source file so that it cannot be read - if err = os.Chmod(srcfn, 0o220); err != nil { - t.Fatal(err) - } - - dstdir = filepath.Join(dir, "dst") - - if err = CopyDir(srcdir, dstdir); err == nil { - t.Fatalf("expected error for CopyDir(%s, %s), got none", srcdir, dstdir) - } -} - -func TestCopyFile(t *testing.T) { - dir := t.TempDir() - - srcf, err := os.Create(filepath.Join(dir, "srcfile")) - if err != nil { - t.Fatal(err) - } - - want := "hello world" - if _, err := srcf.Write([]byte(want)); err != nil { - t.Fatal(err) - } - srcf.Close() - - destf := filepath.Join(dir, "destf") - if err := copyFile(srcf.Name(), destf); err != nil { - t.Fatal(err) - } - - got, err := os.ReadFile(destf) - if err != nil { - t.Fatal(err) - } - - if want != string(got) { - t.Fatalf("expected: %s, got: %s", want, string(got)) - } - - wantinfo, err := os.Stat(srcf.Name()) - if err != nil { - t.Fatal(err) - } - - gotinfo, err := os.Stat(destf) - if err != nil { - t.Fatal(err) - } - - if wantinfo.Mode() != gotinfo.Mode() { - t.Fatalf("expected %s: %#v\n to be the same mode as %s: %#v", srcf.Name(), wantinfo.Mode(), destf, gotinfo.Mode()) - } -} - -func TestCopyFileSymlink(t *testing.T) { - dir := t.TempDir() - defer cleanUpDir(dir) - - testcases := map[string]string{ - filepath.Join("./testdata/symlinks/file-symlink"): filepath.Join(dir, "dst-file"), - filepath.Join("./testdata/symlinks/windows-file-symlink"): filepath.Join(dir, "windows-dst-file"), - filepath.Join("./testdata/symlinks/invalid-symlink"): filepath.Join(dir, "invalid-symlink"), - } - - for symlink, dst := range testcases { - t.Run(symlink, func(t *testing.T) { - var err error - if err = copyFile(symlink, dst); err != nil { - t.Fatalf("failed to copy symlink: %s", err) - } - - var want, got string - - if runtime.GOOS == "windows" { - // Creating symlinks on Windows require an additional permission - // regular users aren't granted usually. So we copy the file - // content as a fall back instead of creating a real symlink. - srcb, err := os.ReadFile(symlink) - if err != nil { - t.Fatalf("%+v", err) - } - dstb, err := os.ReadFile(dst) - if err != nil { - t.Fatalf("%+v", err) - } - - want = string(srcb) - got = string(dstb) - } else { - want, err = os.Readlink(symlink) - if err != nil { - t.Fatalf("%+v", err) - } - - got, err = os.Readlink(dst) - if err != nil { - t.Fatalf("could not resolve symlink: %s", err) - } - } - - if want != got { - t.Fatalf("resolved path is incorrect. expected %s, got %s", want, got) - } - }) - } -} - -func TestCopyFileLongFilePath(t *testing.T) { - if runtime.GOOS != "windows" { - // We want to ensure the temporary fix actually fixes the issue with - // os.Chmod and long file paths. This is only applicable on Windows. - t.Skip("skipping on non-windows") - } - - dir := t.TempDir() - - // Create a directory with a long-enough path name to cause the bug in #774. - dirName := "" - for len(dir+string(os.PathSeparator)+dirName) <= 300 { - dirName += "directory" - } - - fullPath := filepath.Join(dir, dirName, string(os.PathSeparator)) - if err := os.MkdirAll(fullPath, 0o750); err != nil && !os.IsExist(err) { - t.Fatalf("%+v", fmt.Errorf("unable to create temp directory: %s", fullPath)) - } - - err := os.WriteFile(fullPath+"src", []byte(nil), 0o640) - if err != nil { - t.Fatalf("%+v", err) - } - - err = copyFile(fullPath+"src", fullPath+"dst") - if err != nil { - t.Fatalf("unexpected error while copying file: %v", err) - } -} - -// C:\Users\appveyor\AppData\Local\Temp\1\gotest639065787\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890\dir4567890 - -func TestCopyFileFail(t *testing.T) { - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in - // Microsoft Windows. Skipping this this until a - // compatible implementation is provided. - t.Skip("skipping on windows") - } - - dir := t.TempDir() - - srcf, err := os.Create(filepath.Join(dir, "srcfile")) - if err != nil { - t.Fatal(err) - } - srcf.Close() - - var dstdir string - - setupInaccessibleDir(t, func(dir string) error { - dstdir = filepath.Join(dir, "dir") - return os.Mkdir(dstdir, 0o770) - }) - - fn := filepath.Join(dstdir, "file") - if err := copyFile(srcf.Name(), fn); err == nil { - t.Fatalf("expected error for %s, got none", fn) - } -} - -// setupInaccessibleDir creates a temporary location with a single -// directory in it, in such a way that that directory is not accessible -// after this function returns. -// -// op is called with the directory as argument, so that it can create -// files or other test artifacts. -// -// If setupInaccessibleDir fails in its preparation, or op fails, t.Fatal -// will be invoked. -func setupInaccessibleDir(t *testing.T, op func(dir string) error) { - dir, err := os.MkdirTemp("", "dep") - if err != nil { - t.Fatal(err) - } - - subdir := filepath.Join(dir, "dir") - - t.Cleanup(func() { - if err := os.Chmod(subdir, 0o770); err != nil { - t.Error(err) - } - }) - - if err := os.Mkdir(subdir, 0o770); err != nil { - t.Fatal(err) - } - - if err := op(subdir); err != nil { - t.Fatal(err) - } - - if err := os.Chmod(subdir, 0o660); err != nil { - t.Fatal(err) - } -} - -func TestIsDir(t *testing.T) { - wd, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - var dn string - - setupInaccessibleDir(t, func(dir string) error { - dn = filepath.Join(dir, "dir") - return os.Mkdir(dn, 0o770) - }) - - tests := map[string]struct { - exists bool - err bool - }{ - wd: {true, false}, - filepath.Join(wd, "testdata"): {true, false}, - filepath.Join(wd, "main.go"): {false, true}, - filepath.Join(wd, "this_file_does_not_exist.thing"): {false, true}, - dn: {false, true}, - } - - if runtime.GOOS == "windows" { - // This test doesn't work on Microsoft Windows because - // of the differences in how file permissions are - // implemented. For this to work, the directory where - // the directory exists should be inaccessible. - delete(tests, dn) - } - - for f, want := range tests { - got, err := IsDir(f) - if err != nil && !want.err { - t.Fatalf("expected no error, got %v", err) - } - - if got != want.exists { - t.Fatalf("expected %t for %s, got %t", want.exists, f, got) - } - } -} - -func TestIsSymlink(t *testing.T) { - dir := t.TempDir() - - dirPath := filepath.Join(dir, "directory") - if err := os.MkdirAll(dirPath, 0o770); err != nil { - t.Fatal(err) - } - - filePath := filepath.Join(dir, "file") - f, err := os.Create(filePath) - if err != nil { - t.Fatal(err) - } - f.Close() - - dirSymlink := filepath.Join(dir, "dirSymlink") - fileSymlink := filepath.Join(dir, "fileSymlink") - - if err = os.Symlink(dirPath, dirSymlink); err != nil { - t.Fatal(err) - } - if err = os.Symlink(filePath, fileSymlink); err != nil { - t.Fatal(err) - } - - var ( - inaccessibleFile string - inaccessibleSymlink string - ) - - setupInaccessibleDir(t, func(dir string) error { - inaccessibleFile = filepath.Join(dir, "file") - if fh, err := os.Create(inaccessibleFile); err != nil { - return err - } else if err = fh.Close(); err != nil { - return err - } - - inaccessibleSymlink = filepath.Join(dir, "symlink") - return os.Symlink(inaccessibleFile, inaccessibleSymlink) - }) - - tests := map[string]struct{ expected, err bool }{ - dirPath: {false, false}, - filePath: {false, false}, - dirSymlink: {true, false}, - fileSymlink: {true, false}, - inaccessibleFile: {false, true}, - inaccessibleSymlink: {false, true}, - } - - if runtime.GOOS == "windows" { - // XXX: setting permissions works differently in Windows. Skipping - // these cases until a compatible implementation is provided. - delete(tests, inaccessibleFile) - delete(tests, inaccessibleSymlink) - } - - for path, want := range tests { - got, err := IsSymlink(path) - if err != nil { - if !want.err { - t.Errorf("expected no error, got %v", err) - } - } - - if got != want.expected { - t.Errorf("expected %t for %s, got %t", want.expected, path, got) - } - } -} - -func cleanUpDir(dir string) { - if runtime.GOOS == "windows" { - mu.Lock() - exec.Command(`taskkill`, `/F`, `/IM`, `git.exe`).Run() - mu.Unlock() - } - if dir != "" { - os.RemoveAll(dir) - } -} diff --git a/internal/fs/rename.go b/internal/fs/rename.go deleted file mode 100644 index bad1f4778..000000000 --- a/internal/fs/rename.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !windows -// +build !windows - -package fs - -import ( - "fmt" - "os" - "syscall" -) - -// renameFallback attempts to determine the appropriate fallback to failed rename -// operation depending on the resulting error. -func renameFallback(err error, src, dst string) error { - // Rename may fail if src and dst are on different devices; fall back to - // copy if we detect that case. syscall.EXDEV is the common name for the - // cross device link error which has varying output text across different - // operating systems. - terr, ok := err.(*os.LinkError) - if !ok { - return err - } else if terr.Err != syscall.EXDEV { - return fmt.Errorf("link error: cannot rename %s to %s: %w", src, dst, terr) - } - - return renameByCopy(src, dst) -} diff --git a/internal/fs/rename_windows.go b/internal/fs/rename_windows.go deleted file mode 100644 index fa9a0b4d9..000000000 --- a/internal/fs/rename_windows.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows -// +build windows - -package fs - -import ( - "fmt" - "os" - "syscall" -) - -// renameFallback attempts to determine the appropriate fallback to failed rename -// operation depending on the resulting error. -func renameFallback(err error, src, dst string) error { - // Rename may fail if src and dst are on different devices; fall back to - // copy if we detect that case. syscall.EXDEV is the common name for the - // cross device link error which has varying output text across different - // operating systems. - terr, ok := err.(*os.LinkError) - if !ok { - return err - } - - if terr.Err != syscall.EXDEV { - // In windows it can drop down to an operating system call that - // returns an operating system error with a different number and - // message. Checking for that as a fall back. - noerr, ok := terr.Err.(syscall.Errno) - - // 0x11 (ERROR_NOT_SAME_DEVICE) is the windows error. - // See https://msdn.microsoft.com/en-us/library/cc231199.aspx - if ok && noerr != 0x11 { - return fmt.Errorf("link error: cannot rename %s to %s: %w", src, dst, terr) - } - } - - return renameByCopy(src, dst) -} diff --git a/internal/fs/testdata/symlinks/dir-symlink b/internal/fs/testdata/symlinks/dir-symlink deleted file mode 120000 index 777ebd014..000000000 --- a/internal/fs/testdata/symlinks/dir-symlink +++ /dev/null @@ -1 +0,0 @@ -../../testdata \ No newline at end of file diff --git a/internal/fs/testdata/symlinks/file-symlink b/internal/fs/testdata/symlinks/file-symlink deleted file mode 120000 index 4c52274de..000000000 --- a/internal/fs/testdata/symlinks/file-symlink +++ /dev/null @@ -1 +0,0 @@ -../test.file \ No newline at end of file diff --git a/internal/fs/testdata/symlinks/invalid-symlink b/internal/fs/testdata/symlinks/invalid-symlink deleted file mode 120000 index 0edf4f301..000000000 --- a/internal/fs/testdata/symlinks/invalid-symlink +++ /dev/null @@ -1 +0,0 @@ -/non/existing/file \ No newline at end of file diff --git a/internal/fs/testdata/symlinks/windows-file-symlink b/internal/fs/testdata/symlinks/windows-file-symlink deleted file mode 120000 index af1d6c8f5..000000000 --- a/internal/fs/testdata/symlinks/windows-file-symlink +++ /dev/null @@ -1 +0,0 @@ -C:/Users/ibrahim/go/src/github.com/golang/dep/internal/fs/testdata/test.file \ No newline at end of file diff --git a/internal/fs/testdata/test.file b/internal/fs/testdata/test.file deleted file mode 100644 index e69de29bb..000000000 diff --git a/internal/helm/chart/builder.go b/internal/helm/chart/builder.go index b56c8c9a3..6ac896e78 100644 --- a/internal/helm/chart/builder.go +++ b/internal/helm/chart/builder.go @@ -24,10 +24,10 @@ import ( "regexp" "strings" + sourcefs "github.com/fluxcd/pkg/oci" helmchart "helm.sh/helm/v3/pkg/chart" "helm.sh/helm/v3/pkg/chartutil" - "github.com/fluxcd/source-controller/internal/fs" "github.com/fluxcd/source-controller/internal/oci" ) @@ -219,7 +219,7 @@ func packageToPath(chart *helmchart.Chart, out string) error { if err != nil { return fmt.Errorf("failed to package chart: %w", err) } - if err = fs.RenameWithFallback(p, out); err != nil { + if err = sourcefs.RenameWithFallback(p, out); err != nil { return fmt.Errorf("failed to write chart to file: %w", err) } return nil diff --git a/internal/helm/chart/builder_local_test.go b/internal/helm/chart/builder_local_test.go index 6434b5095..4b26e1419 100644 --- a/internal/helm/chart/builder_local_test.go +++ b/internal/helm/chart/builder_local_test.go @@ -93,7 +93,7 @@ func TestLocalBuilder_Build(t *testing.T) { name: "invalid version metadata", reference: LocalReference{Path: "../testdata/charts/helmchart"}, buildOpts: BuildOptions{VersionMetadata: "^"}, - wantErr: "Invalid Metadata string", + wantErr: "invalid metadata string", }, { name: "with version metadata", diff --git a/internal/helm/chart/builder_remote.go b/internal/helm/chart/builder_remote.go index 1010d8cc1..2cfdf81b4 100644 --- a/internal/helm/chart/builder_remote.go +++ b/internal/helm/chart/builder_remote.go @@ -30,9 +30,9 @@ import ( "helm.sh/helm/v3/pkg/repo" "sigs.k8s.io/yaml" + sourcefs "github.com/fluxcd/pkg/oci" "github.com/fluxcd/pkg/runtime/transform" - "github.com/fluxcd/source-controller/internal/fs" "github.com/fluxcd/source-controller/internal/helm/chart/secureloader" "github.com/fluxcd/source-controller/internal/helm/repository" "github.com/fluxcd/source-controller/internal/oci" @@ -290,7 +290,7 @@ func validatePackageAndWriteToPath(reader io.Reader, out string) error { if err = meta.Validate(); err != nil { return fmt.Errorf("failed to validate metadata of written chart: %w", err) } - if err = fs.RenameWithFallback(tmpFile.Name(), out); err != nil { + if err = sourcefs.RenameWithFallback(tmpFile.Name(), out); err != nil { return fmt.Errorf("failed to write chart to file: %w", err) } return nil diff --git a/internal/helm/chart/builder_remote_test.go b/internal/helm/chart/builder_remote_test.go index ebe31ae3a..7994fa5ee 100644 --- a/internal/helm/chart/builder_remote_test.go +++ b/internal/helm/chart/builder_remote_test.go @@ -152,7 +152,7 @@ entries: reference: RemoteReference{Name: "grafana"}, repository: mockRepo(), buildOpts: BuildOptions{VersionMetadata: "^"}, - wantErr: "Invalid Metadata string", + wantErr: "invalid metadata string", }, { name: "with version metadata", @@ -300,7 +300,7 @@ func TestRemoteBuilder_BuildFromOCIChartRepository(t *testing.T) { reference: RemoteReference{Name: "grafana"}, repository: mockRepo(), buildOpts: BuildOptions{VersionMetadata: "^"}, - wantErr: "Invalid Metadata string", + wantErr: "invalid metadata string", }, { name: "with version metadata", diff --git a/internal/helm/getter/client_opts.go b/internal/helm/getter/client_opts.go index b586b41b5..e40811b39 100644 --- a/internal/helm/getter/client_opts.go +++ b/internal/helm/getter/client_opts.go @@ -24,7 +24,6 @@ import ( "os" "path" - "github.com/fluxcd/pkg/oci" "github.com/google/go-containerregistry/pkg/authn" helmgetter "helm.sh/helm/v3/pkg/getter" helmreg "helm.sh/helm/v3/pkg/registry" @@ -32,11 +31,11 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/fluxcd/pkg/runtime/secrets" + sourcev1 "github.com/fluxcd/source-controller/api/v1" - sourcev1beta2 "github.com/fluxcd/source-controller/api/v1beta2" "github.com/fluxcd/source-controller/internal/helm/registry" soci "github.com/fluxcd/source-controller/internal/oci" - stls "github.com/fluxcd/source-controller/internal/tls" ) const ( @@ -71,112 +70,163 @@ func (o ClientOpts) MustLoginToRegistry() bool { // A temporary directory is created to store the certs files if needed and its path is returned along with the options object. It is the // caller's responsibility to clean up the directory. func GetClientOpts(ctx context.Context, c client.Client, obj *sourcev1.HelmRepository, url string) (*ClientOpts, string, error) { - hrOpts := &ClientOpts{ + // This function configures authentication for Helm repositories based on the provided secrets: + // - CertSecretRef: TLS client certificates (always takes priority) + // - SecretRef: Can contain Basic Auth or TLS certificates (deprecated) + // For OCI repositories, additional registry-specific authentication is configured (including Docker config) + opts := &ClientOpts{ GetterOpts: []helmgetter.Option{ helmgetter.WithURL(url), helmgetter.WithTimeout(obj.GetTimeout()), helmgetter.WithPassCredentialsAll(obj.Spec.PassCredentials), }, + Insecure: obj.Spec.Insecure, } - ociRepo := obj.Spec.Type == sourcev1.HelmRepositoryTypeOCI - var ( - certSecret *corev1.Secret - tlsBytes *stls.TLSBytes - certFile string - keyFile string - caFile string - dir string - err error - ) - // Check `.spec.certSecretRef` first for any TLS auth data. + // Process secrets and configure authentication + deprecatedTLS, certSecret, authSecret, err := configureAuthentication(ctx, c, obj, opts, url) + if err != nil { + return nil, "", err + } + + // Setup OCI registry specific configurations if needed + var tempCertDir string + if obj.Spec.Type == sourcev1.HelmRepositoryTypeOCI { + tempCertDir, err = configureOCIRegistryWithSecrets(ctx, obj, opts, url, certSecret, authSecret) + if err != nil { + return nil, "", err + } + } + + var deprecatedErr error + if deprecatedTLS { + deprecatedErr = ErrDeprecatedTLSConfig + } + + return opts, tempCertDir, deprecatedErr +} + +// configureAuthentication processes all secret references and sets up authentication. +// Returns (deprecatedTLS, certSecret, authSecret, error) where: +// - deprecatedTLS: true if TLS config comes from SecretRef (deprecated pattern) +// - certSecret: the secret from CertSecretRef (nil if not specified) +// - authSecret: the secret from SecretRef (nil if not specified) +func configureAuthentication(ctx context.Context, c client.Client, obj *sourcev1.HelmRepository, opts *ClientOpts, url string) (bool, *corev1.Secret, *corev1.Secret, error) { + var deprecatedTLS bool + var certSecret, authSecret *corev1.Secret + if obj.Spec.CertSecretRef != nil { - certSecret, err = fetchSecret(ctx, c, obj.Spec.CertSecretRef.Name, obj.GetNamespace()) + secret, err := fetchSecret(ctx, c, obj.Spec.CertSecretRef.Name, obj.GetNamespace()) if err != nil { - return nil, "", fmt.Errorf("failed to get TLS authentication secret '%s/%s': %w", obj.GetNamespace(), obj.Spec.CertSecretRef.Name, err) + secretRef := types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.Spec.CertSecretRef.Name} + return false, nil, nil, fmt.Errorf("failed to get TLS authentication secret '%s': %w", secretRef, err) } + certSecret = secret - hrOpts.TlsConfig, tlsBytes, err = stls.KubeTLSClientConfigFromSecret(*certSecret, url) + // NOTE: Use WithSystemCertPool to maintain backward compatibility with the existing + // extend approach (system CAs + user CA) rather than the default replace approach (user CA only). + // This ensures HelmRepository continues to work with both system and user-provided CA certificates. + var tlsOpts = []secrets.TLSConfigOption{secrets.WithSystemCertPool()} + tlsConfig, err := secrets.TLSConfigFromSecret(ctx, secret, tlsOpts...) if err != nil { - return nil, "", fmt.Errorf("failed to construct Helm client's TLS config: %w", err) + return false, nil, nil, fmt.Errorf("failed to construct Helm client's TLS config: %w", err) } + opts.TlsConfig = tlsConfig } - var authSecret *corev1.Secret - var deprecatedTLSConfig bool + // Extract all authentication methods from SecretRef. + // This secret may contain multiple auth types (Basic Auth, TLS). if obj.Spec.SecretRef != nil { - authSecret, err = fetchSecret(ctx, c, obj.Spec.SecretRef.Name, obj.GetNamespace()) + secret, err := fetchSecret(ctx, c, obj.Spec.SecretRef.Name, obj.GetNamespace()) if err != nil { - return nil, "", fmt.Errorf("failed to get authentication secret '%s/%s': %w", obj.GetNamespace(), obj.Spec.SecretRef.Name, err) + secretRef := types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.Spec.SecretRef.Name} + return false, nil, nil, fmt.Errorf("failed to get authentication secret '%s': %w", secretRef, err) } + authSecret = secret - // Construct actual Helm client options. - opts, err := GetterOptionsFromSecret(*authSecret) + // NOTE: Use WithTLSSystemCertPool to maintain backward compatibility with the existing + // extend approach (system CAs + user CA) rather than the default replace approach (user CA only). + // This ensures HelmRepository auth methods work with both system and user-provided CA certificates. + var authOpts = []secrets.AuthMethodsOption{ + secrets.WithTLSSystemCertPool(), + } + methods, err := secrets.AuthMethodsFromSecret(ctx, secret, authOpts...) if err != nil { - return nil, "", fmt.Errorf("failed to configure Helm client: %w", err) + return false, nil, nil, fmt.Errorf("failed to detect authentication methods: %w", err) } - hrOpts.GetterOpts = append(hrOpts.GetterOpts, opts...) - // If the TLS config is nil, i.e. one couldn't be constructed using - // `.spec.certSecretRef`, then try to use `.spec.secretRef`. - if hrOpts.TlsConfig == nil && !ociRepo { - hrOpts.TlsConfig, tlsBytes, err = stls.LegacyTLSClientConfigFromSecret(*authSecret, url) - if err != nil { - return nil, "", fmt.Errorf("failed to construct Helm client's TLS config: %w", err) - } - // Constructing a TLS config using the auth secret is deprecated behavior. - if hrOpts.TlsConfig != nil { - deprecatedTLSConfig = true - } + if methods.HasBasicAuth() { + opts.GetterOpts = append(opts.GetterOpts, + helmgetter.WithBasicAuth(methods.Basic.Username, methods.Basic.Password)) } - if ociRepo { - hrOpts.Keychain, err = registry.LoginOptionFromSecret(url, *authSecret) - if err != nil { - return nil, "", fmt.Errorf("failed to configure login options: %w", err) - } + // Use TLS from SecretRef only if CertSecretRef is not specified (CertSecretRef takes priority) + if opts.TlsConfig == nil && methods.HasTLS() { + opts.TlsConfig = methods.TLS + deprecatedTLS = true } - } else if obj.Spec.Provider != sourcev1beta2.GenericOCIProvider && obj.Spec.Type == sourcev1.HelmRepositoryTypeOCI && ociRepo { - authenticator, authErr := soci.OIDCAuth(ctx, obj.Spec.URL, obj.Spec.Provider, nil) - if authErr != nil && !errors.Is(authErr, oci.ErrUnconfiguredProvider) { - return nil, "", fmt.Errorf("failed to get credential from '%s': %w", obj.Spec.Provider, authErr) + } + + return deprecatedTLS, certSecret, authSecret, nil +} + +// configureOCIRegistryWithSecrets sets up OCI-specific configurations using pre-fetched secrets +func configureOCIRegistryWithSecrets(ctx context.Context, obj *sourcev1.HelmRepository, opts *ClientOpts, url string, certSecret, authSecret *corev1.Secret) (string, error) { + // Configure OCI authentication from authSecret if available + if authSecret != nil { + keychain, err := registry.LoginOptionFromSecret(url, *authSecret) + if err != nil { + return "", fmt.Errorf("failed to configure login options: %w", err) } - if authenticator != nil { - hrOpts.Authenticator = authenticator + opts.Keychain = keychain + } + + // Handle OCI provider authentication if no SecretRef + if obj.Spec.SecretRef == nil && obj.Spec.Provider != "" && obj.Spec.Provider != sourcev1.GenericOCIProvider { + authenticator, err := soci.OIDCAuth(ctx, url, obj.Spec.Provider) + if err != nil { + return "", fmt.Errorf("failed to get credential from '%s': %w", obj.Spec.Provider, err) } + opts.Authenticator = authenticator } - if ociRepo { - // Persist the certs files to the path if needed. - if tlsBytes != nil { - dir, err = os.MkdirTemp("", "helm-repo-oci-certs") - if err != nil { - return nil, "", fmt.Errorf("cannot create temporary directory: %w", err) - } - certFile, keyFile, caFile, err = storeTLSCertificateFiles(tlsBytes, dir) - if err != nil { - return nil, "", fmt.Errorf("cannot write certs files to path: %w", err) - } + // Setup registry login options + loginOpt, err := registry.NewLoginOption(opts.Authenticator, opts.Keychain, url) + if err != nil { + return "", err + } + + if loginOpt != nil { + opts.RegLoginOpts = []helmreg.LoginOption{loginOpt, helmreg.LoginOptInsecure(obj.Spec.Insecure)} + } + + // Handle TLS certificate files for OCI + var tempCertDir string + if opts.TlsConfig != nil { + tempCertDir, err = os.MkdirTemp("", "helm-repo-oci-certs") + if err != nil { + return "", fmt.Errorf("cannot create temporary directory: %w", err) + } + + var tlsSecret *corev1.Secret + if certSecret != nil { + tlsSecret = certSecret + } else if authSecret != nil { + tlsSecret = authSecret } - loginOpt, err := registry.NewLoginOption(hrOpts.Authenticator, hrOpts.Keychain, url) + + certFile, keyFile, caFile, err := storeTLSCertificateFilesForOCI(ctx, tlsSecret, nil, tempCertDir) if err != nil { - return nil, "", err + return "", fmt.Errorf("cannot write certs files to path: %w", err) } - if loginOpt != nil { - hrOpts.RegLoginOpts = []helmreg.LoginOption{loginOpt, helmreg.LoginOptInsecure(obj.Spec.Insecure)} - tlsLoginOpt := registry.TLSLoginOption(certFile, keyFile, caFile) - if tlsLoginOpt != nil { - hrOpts.RegLoginOpts = append(hrOpts.RegLoginOpts, tlsLoginOpt) - } + + tlsLoginOpt := registry.TLSLoginOption(certFile, keyFile, caFile) + if tlsLoginOpt != nil { + opts.RegLoginOpts = append(opts.RegLoginOpts, tlsLoginOpt) } } - if deprecatedTLSConfig { - err = ErrDeprecatedTLSConfig - } - - hrOpts.Insecure = obj.Spec.Insecure - return hrOpts, dir, err + return tempCertDir, nil } func fetchSecret(ctx context.Context, c client.Client, name, namespace string) (*corev1.Secret, error) { @@ -191,30 +241,48 @@ func fetchSecret(ctx context.Context, c client.Client, name, namespace string) ( return &secret, nil } -// storeTLSCertificateFiles writes the certs files to the given path and returns the files paths. -func storeTLSCertificateFiles(tlsBytes *stls.TLSBytes, path string) (string, string, string, error) { +// storeTLSCertificateFilesForOCI writes TLS certificate data from secrets to files for OCI registry authentication. +// Helm OCI registry client requires certificate file paths rather than in-memory data, +// so we need to temporarily write the certificate data to disk. +// Returns paths to the written cert, key, and CA files (any of which may be empty if not present). +func storeTLSCertificateFilesForOCI(ctx context.Context, certSecret, authSecret *corev1.Secret, path string) (string, string, string, error) { var ( certFile string keyFile string caFile string err error ) - if len(tlsBytes.CertBytes) > 0 && len(tlsBytes.KeyBytes) > 0 { - certFile, err = writeToFile(tlsBytes.CertBytes, certFileName, path) - if err != nil { - return "", "", "", err - } - keyFile, err = writeToFile(tlsBytes.KeyBytes, keyFileName, path) - if err != nil { - return "", "", "", err - } + + // Try to get TLS data from certSecret first, then authSecret + var tlsSecret *corev1.Secret + if certSecret != nil { + tlsSecret = certSecret + } else if authSecret != nil { + tlsSecret = authSecret } - if len(tlsBytes.CABytes) > 0 { - caFile, err = writeToFile(tlsBytes.CABytes, caFileName, path) - if err != nil { - return "", "", "", err + + if tlsSecret != nil { + if certData, exists := tlsSecret.Data[secrets.KeyTLSCert]; exists { + if keyData, keyExists := tlsSecret.Data[secrets.KeyTLSPrivateKey]; keyExists { + certFile, err = writeToFile(certData, certFileName, path) + if err != nil { + return "", "", "", err + } + keyFile, err = writeToFile(keyData, keyFileName, path) + if err != nil { + return "", "", "", err + } + } + } + + if caData, exists := tlsSecret.Data[secrets.KeyCACert]; exists { + caFile, err = writeToFile(caData, caFileName, path) + if err != nil { + return "", "", "", err + } } } + return certFile, keyFile, caFile, nil } diff --git a/internal/helm/getter/client_opts_test.go b/internal/helm/getter/client_opts_test.go index b8bf15f28..bf40e7f86 100644 --- a/internal/helm/getter/client_opts_test.go +++ b/internal/helm/getter/client_opts_test.go @@ -19,6 +19,7 @@ package getter import ( "context" "os" + "strings" "testing" "time" @@ -64,7 +65,6 @@ func TestGetClientOpts(t *testing.T) { Data: map[string][]byte{ "username": []byte("user"), "password": []byte("pass"), - "caFile": []byte("invalid"), }, }, afterFunc: func(t *WithT, hcOpts *ClientOpts) { @@ -186,6 +186,7 @@ func TestGetClientOpts_registryTLSLoginOption(t *testing.T) { certSecret *corev1.Secret authSecret *corev1.Secret loginOptsN int + wantErrMsg string }{ { name: "with valid caFile", @@ -225,7 +226,7 @@ func TestGetClientOpts_registryTLSLoginOption(t *testing.T) { "password": []byte("pass"), }, }, - loginOptsN: 2, + wantErrMsg: "must contain either 'ca.crt' or both 'tls.crt' and 'tls.key'", }, { name: "without cert secret", @@ -271,6 +272,17 @@ func TestGetClientOpts_registryTLSLoginOption(t *testing.T) { c := clientBuilder.Build() clientOpts, tmpDir, err := GetClientOpts(context.TODO(), c, helmRepo, "https://ghcr.io/dummy") + if tt.wantErrMsg != "" { + if err == nil { + t.Errorf("GetClientOpts() expected error but got none") + return + } + if !strings.Contains(err.Error(), tt.wantErrMsg) { + t.Errorf("GetClientOpts() expected error containing %q but got %v", tt.wantErrMsg, err) + return + } + return + } if err != nil { t.Errorf("GetClientOpts() error = %v", err) return diff --git a/internal/helm/getter/getter.go b/internal/helm/getter/getter.go deleted file mode 100644 index 18661da16..000000000 --- a/internal/helm/getter/getter.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package getter - -import ( - "fmt" - - "helm.sh/helm/v3/pkg/getter" - corev1 "k8s.io/api/core/v1" -) - -// GetterOptionsFromSecret constructs a getter.Option slice for the given secret. -// It returns the slice, or an error. -func GetterOptionsFromSecret(secret corev1.Secret) ([]getter.Option, error) { - var opts []getter.Option - basicAuth, err := basicAuthFromSecret(secret) - if err != nil { - return opts, err - } - if basicAuth != nil { - opts = append(opts, basicAuth) - } - return opts, nil -} - -// basicAuthFromSecret attempts to construct a basic auth getter.Option for the -// given v1.Secret and returns the result. -// -// Secrets with no username AND password are ignored, if only one is defined it -// returns an error. -func basicAuthFromSecret(secret corev1.Secret) (getter.Option, error) { - username, password := string(secret.Data["username"]), string(secret.Data["password"]) - switch { - case username == "" && password == "": - return nil, nil - case username == "" || password == "": - return nil, fmt.Errorf("invalid '%s' secret data: required fields 'username' and 'password'", secret.Name) - } - return getter.WithBasicAuth(username, password), nil -} diff --git a/internal/helm/getter/getter_test.go b/internal/helm/getter/getter_test.go deleted file mode 100644 index cffe0064f..000000000 --- a/internal/helm/getter/getter_test.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2020 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package getter - -import ( - "testing" - - corev1 "k8s.io/api/core/v1" -) - -var ( - basicAuthSecretFixture = corev1.Secret{ - Data: map[string][]byte{ - "username": []byte("user"), - "password": []byte("password"), - }, - } -) - -func TestGetterOptionsFromSecret(t *testing.T) { - tests := []struct { - name string - secrets []corev1.Secret - }{ - {"basic auth", []corev1.Secret{basicAuthSecretFixture}}, - {"empty", []corev1.Secret{}}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - secret := corev1.Secret{Data: map[string][]byte{}} - for _, s := range tt.secrets { - for k, v := range s.Data { - secret.Data[k] = v - } - } - - got, err := GetterOptionsFromSecret(secret) - if err != nil { - t.Errorf("ClientOptionsFromSecret() error = %v", err) - return - } - if len(got) != len(tt.secrets) { - t.Errorf("ClientOptionsFromSecret() options = %v, expected = %v", got, len(tt.secrets)) - } - }) - } -} - -func Test_basicAuthFromSecret(t *testing.T) { - tests := []struct { - name string - secret corev1.Secret - modify func(secret *corev1.Secret) - wantErr bool - wantNil bool - }{ - {"username and password", basicAuthSecretFixture, nil, false, false}, - {"without username", basicAuthSecretFixture, func(s *corev1.Secret) { delete(s.Data, "username") }, true, true}, - {"without password", basicAuthSecretFixture, func(s *corev1.Secret) { delete(s.Data, "password") }, true, true}, - {"empty", corev1.Secret{}, nil, false, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - secret := tt.secret.DeepCopy() - if tt.modify != nil { - tt.modify(secret) - } - got, err := basicAuthFromSecret(*secret) - if (err != nil) != tt.wantErr { - t.Errorf("BasicAuthFromSecret() error = %v, wantErr %v", err, tt.wantErr) - return - } - if tt.wantNil && got != nil { - t.Error("BasicAuthFromSecret() != nil") - return - } - }) - } -} diff --git a/internal/helm/repository/chart_repository.go b/internal/helm/repository/chart_repository.go index 9837224f4..e8030ec7b 100644 --- a/internal/helm/repository/chart_repository.go +++ b/internal/helm/repository/chart_repository.go @@ -40,9 +40,9 @@ import ( "github.com/fluxcd/pkg/version" + "github.com/fluxcd/pkg/http/transport" "github.com/fluxcd/source-controller/internal/helm" "github.com/fluxcd/source-controller/internal/oci" - "github.com/fluxcd/source-controller/internal/transport" ) var ( diff --git a/internal/helm/repository/oci_chart_repository.go b/internal/helm/repository/oci_chart_repository.go index c858befff..2bed964a2 100644 --- a/internal/helm/repository/oci_chart_repository.go +++ b/internal/helm/repository/oci_chart_repository.go @@ -36,9 +36,9 @@ import ( "github.com/Masterminds/semver/v3" "github.com/google/go-containerregistry/pkg/name" + "github.com/fluxcd/pkg/http/transport" "github.com/fluxcd/pkg/version" "github.com/fluxcd/source-controller/internal/oci" - "github.com/fluxcd/source-controller/internal/transport" ) // RegistryClient is an interface for interacting with OCI registries diff --git a/internal/index/digest_test.go b/internal/index/digest_test.go index 8afc4fd09..531bb9329 100644 --- a/internal/index/digest_test.go +++ b/internal/index/digest_test.go @@ -49,6 +49,13 @@ func TestWithIndex(t *testing.T) { g.Expect(d.digests).To(BeEmpty()) }) + + t.Run("handles nil index", func(t *testing.T) { + g := NewWithT(t) + d := &Digester{} + WithIndex(nil)(d) + g.Expect(d.index).To(BeNil()) + }) } func TestNewDigester(t *testing.T) { @@ -107,6 +114,13 @@ func TestDigester_Add(t *testing.T) { g.Expect(d.digests).To(BeEmpty()) }) + + t.Run("adds empty key and value", func(t *testing.T) { + g := NewWithT(t) + d := NewDigester() + d.Add("", "") + g.Expect(d.index).To(HaveKeyWithValue("", "")) + }) } func TestDigester_Delete(t *testing.T) { @@ -138,6 +152,14 @@ func TestDigester_Delete(t *testing.T) { d.Delete("foo") g.Expect(d.digests).To(BeEmpty()) }) + + t.Run("deletes non-existent key without error", func(t *testing.T) { + g := NewWithT(t) + d := NewDigester() + d.Delete("non-existent") + g.Expect(d.index).To(BeEmpty()) + g.Expect(d.digests).To(BeEmpty()) + }) } func TestDigester_Get(t *testing.T) { @@ -161,17 +183,26 @@ func TestDigester_Has(t *testing.T) { } func TestDigester_Index(t *testing.T) { - g := NewWithT(t) + t.Run("returns a copy of the index", func(t *testing.T) { + g := NewWithT(t) - i := map[string]string{ - "foo": "bar", - "bar": "baz", - } - d := NewDigester(WithIndex(i)) + i := map[string]string{ + "foo": "bar", + "bar": "baz", + } + d := NewDigester(WithIndex(i)) - iCopy := d.Index() - g.Expect(iCopy).To(Equal(i)) - g.Expect(iCopy).ToNot(BeIdenticalTo(i)) + iCopy := d.Index() + g.Expect(iCopy).To(Equal(i)) + g.Expect(iCopy).ToNot(BeIdenticalTo(i)) + }) + + t.Run("returns an empty copy for an empty index", func(t *testing.T) { + g := NewWithT(t) + d := NewDigester() + emptyIndex := d.Index() + g.Expect(emptyIndex).To(BeEmpty()) + }) } func TestDigester_Len(t *testing.T) { @@ -183,6 +214,8 @@ func TestDigester_Len(t *testing.T) { })) g.Expect(d.Len()).To(Equal(2)) + + g.Expect(NewDigester().Len()).To(Equal(0)) } func TestDigester_String(t *testing.T) { @@ -196,6 +229,8 @@ func TestDigester_String(t *testing.T) { g.Expect(d.String()).To(Equal(`bar baz foo bar `)) + + g.Expect(NewDigester().String()).To(Equal("")) } func TestDigester_WriteTo(t *testing.T) { diff --git a/internal/object/object.go b/internal/object/object.go index 105b40330..37f8ef9fe 100644 --- a/internal/object/object.go +++ b/internal/object/object.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - sourcev1 "github.com/fluxcd/source-controller/api/v1" + "github.com/fluxcd/pkg/apis/meta" ) var ( @@ -148,7 +148,7 @@ func SetSuspend(obj runtime.Object, val bool) error { } // GetArtifact returns the status.artifact of a given runtime object. -func GetArtifact(obj runtime.Object) (*sourcev1.Artifact, error) { +func GetArtifact(obj runtime.Object) (*meta.Artifact, error) { u, err := toUnstructured(obj) if err != nil { return nil, err @@ -165,7 +165,7 @@ func GetArtifact(obj runtime.Object) (*sourcev1.Artifact, error) { if err != nil { return nil, err } - outArtifact := &sourcev1.Artifact{} + outArtifact := &meta.Artifact{} if err := json.Unmarshal(enc, outArtifact); err != nil { return nil, err } diff --git a/internal/object/object_test.go b/internal/object/object_test.go index 91932d11d..35cab3303 100644 --- a/internal/object/object_test.go +++ b/internal/object/object_test.go @@ -24,6 +24,8 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/fluxcd/pkg/apis/meta" + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) @@ -127,7 +129,7 @@ func TestGetArtifact(t *testing.T) { g.Expect(artifact).To(BeNil()) // Get set artifact value. - obj.Status.Artifact = &sourcev1.Artifact{Path: "aaa", Revision: "zzz"} + obj.Status.Artifact = &meta.Artifact{Path: "aaa", Revision: "zzz"} artifact, err = GetArtifact(obj) g.Expect(err).ToNot(HaveOccurred()) g.Expect(artifact).ToNot(BeNil()) diff --git a/internal/oci/auth.go b/internal/oci/auth.go index cfbc684eb..6bd35c59e 100644 --- a/internal/oci/auth.go +++ b/internal/oci/auth.go @@ -18,15 +18,14 @@ package oci import ( "context" - "fmt" - "net/url" "strings" - "github.com/fluxcd/pkg/oci/auth/login" "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + "github.com/fluxcd/pkg/auth" + authutils "github.com/fluxcd/pkg/auth/utils" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) // Anonymous is an authn.AuthConfig that always returns an anonymous @@ -41,22 +40,7 @@ func (a Anonymous) Resolve(_ authn.Resource) (authn.Authenticator, error) { } // OIDCAuth generates the OIDC credential authenticator based on the specified cloud provider. -func OIDCAuth(ctx context.Context, url, provider string, proxyURL *url.URL) (authn.Authenticator, error) { +func OIDCAuth(ctx context.Context, url, provider string, opts ...auth.Option) (authn.Authenticator, error) { u := strings.TrimPrefix(url, sourcev1.OCIRepositoryPrefix) - ref, err := name.ParseReference(u) - if err != nil { - return nil, fmt.Errorf("failed to parse URL '%s': %w", u, err) - } - - opts := login.ProviderOptions{} - switch provider { - case sourcev1.AmazonOCIProvider: - opts.AwsAutoLogin = true - case sourcev1.AzureOCIProvider: - opts.AzureAutoLogin = true - case sourcev1.GoogleOCIProvider: - opts.GcpAutoLogin = true - } - - return login.NewManager(login.WithProxyURL(proxyURL)).Login(ctx, u, ref, opts) + return authutils.GetArtifactRegistryCredentials(ctx, provider, u, opts...) } diff --git a/internal/predicates/helmrepository_type_predicate_test.go b/internal/predicates/helmrepository_type_predicate_test.go index 643e823e7..e98728413 100644 --- a/internal/predicates/helmrepository_type_predicate_test.go +++ b/internal/predicates/helmrepository_type_predicate_test.go @@ -160,7 +160,7 @@ func TestHelmRepositoryOCIMigrationPredicate_Update(t *testing.T) { Type: sourcev1.HelmRepositoryTypeDefault, } oldObj.Status = sourcev1.HelmRepositoryStatus{ - Artifact: &sourcev1.Artifact{}, + Artifact: &meta.Artifact{}, URL: "http://some-address", ObservedGeneration: 3, } diff --git a/internal/reconcile/reconcile.go b/internal/reconcile/reconcile.go index 58a160b8b..27c931168 100644 --- a/internal/reconcile/reconcile.go +++ b/internal/reconcile/reconcile.go @@ -137,7 +137,7 @@ func ComputeReconcileResult(obj conditions.Setter, res Result, recErr error, rb switch t := recErr.(type) { case *serror.Stalling: if res == ResultEmpty { - conditions.MarkStalled(obj, t.Reason, t.Error()) + conditions.MarkStalled(obj, t.Reason, "%s", t.Error()) // The current generation has been reconciled successfully and it // has resulted in a stalled state. Return no error to stop further // requeuing. diff --git a/internal/reconcile/reconcile_test.go b/internal/reconcile/reconcile_test.go index 15a60b0d4..e22f370b5 100644 --- a/internal/reconcile/reconcile_test.go +++ b/internal/reconcile/reconcile_test.go @@ -29,7 +29,7 @@ import ( "github.com/fluxcd/pkg/runtime/conditions" "github.com/fluxcd/pkg/runtime/patch" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" serror "github.com/fluxcd/source-controller/internal/error" ) diff --git a/internal/reconcile/summarize/processor_test.go b/internal/reconcile/summarize/processor_test.go index dc6765d83..44f68b5bf 100644 --- a/internal/reconcile/summarize/processor_test.go +++ b/internal/reconcile/summarize/processor_test.go @@ -26,7 +26,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/fluxcd/pkg/apis/meta" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" "github.com/fluxcd/source-controller/internal/object" "github.com/fluxcd/source-controller/internal/reconcile" ) @@ -64,6 +65,43 @@ func TestRecordReconcileReq(t *testing.T) { t.Expect(obj).To(HaveStatusLastHandledReconcileAt("now")) }, }, + { + name: "empty reconcile annotation value", + beforeFunc: func(obj client.Object) { + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "", + } + obj.SetAnnotations(annotations) + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt("")) + }, + }, + { + name: "whitespace-only reconcile annotation value", + beforeFunc: func(obj client.Object) { + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: " ", + } + obj.SetAnnotations(annotations) + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt(" ")) + }, + }, + { + name: "reconcile annotation overwrites existing status value", + beforeFunc: func(obj client.Object) { + object.SetStatusLastHandledReconcileAt(obj, "old-value") + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "new-value", + } + obj.SetAnnotations(annotations) + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt("new-value")) + }, + }, } for _, tt := range tests { diff --git a/internal/tls/config.go b/internal/tls/config.go deleted file mode 100644 index 841c9538e..000000000 --- a/internal/tls/config.go +++ /dev/null @@ -1,161 +0,0 @@ -/* -Copyright 2023 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package tls - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - neturl "net/url" - - corev1 "k8s.io/api/core/v1" -) - -const CACrtKey = "ca.crt" - -// TLSBytes contains the bytes of the TLS files. -type TLSBytes struct { - // CertBytes is the bytes of the certificate file. - CertBytes []byte - // KeyBytes is the bytes of the key file. - KeyBytes []byte - // CABytes is the bytes of the CA file. - CABytes []byte -} - -// KubeTLSClientConfigFromSecret returns a TLS client config as a `tls.Config` -// object and in its bytes representation. The secret is expected to have the -// following keys: -// - tls.key, for the private key -// - tls.crt, for the certificate -// - ca.crt, for the CA certificate -// -// Secrets with no certificate, private key, AND CA cert are ignored. If only a -// certificate OR private key is found, an error is returned. The Secret type -// can be blank, Opaque or kubernetes.io/tls. -func KubeTLSClientConfigFromSecret(secret corev1.Secret, url string) (*tls.Config, *TLSBytes, error) { - return tlsClientConfigFromSecret(secret, url, true, true) -} - -// TLSClientConfigFromSecret returns a TLS client config as a `tls.Config` -// object and in its bytes representation. The secret is expected to have the -// following keys: -// - keyFile, for the private key -// - certFile, for the certificate -// - caFile, for the CA certificate -// -// Secrets with no certificate, private key, AND CA cert are ignored. If only a -// certificate OR private key is found, an error is returned. The Secret type -// can be blank, Opaque or kubernetes.io/tls. -func TLSClientConfigFromSecret(secret corev1.Secret, url string) (*tls.Config, *TLSBytes, error) { - return tlsClientConfigFromSecret(secret, url, false, true) -} - -// LegacyTLSClientConfigFromSecret returns a TLS client config as a `tls.Config` -// object and in its bytes representation. The secret is expected to have the -// following keys: -// - keyFile, for the private key -// - certFile, for the certificate -// - caFile, for the CA certificate -// -// Secrets with no certificate, private key, AND CA cert are ignored. If only a -// certificate OR private key is found, an error is returned. -func LegacyTLSClientConfigFromSecret(secret corev1.Secret, url string) (*tls.Config, *TLSBytes, error) { - return tlsClientConfigFromSecret(secret, url, false, false) -} - -// tlsClientConfigFromSecret attempts to construct and return a TLS client -// config from the given Secret. If the Secret does not contain any TLS -// data, it returns nil. -// -// kubernetesTLSKeys is a boolean indicating whether to check the Secret -// for keys expected to be present in a Kubernetes TLS Secret. Based on its -// value, the Secret is checked for the following keys: -// - tls.key/keyFile for the private key -// - tls.crt/certFile for the certificate -// - ca.crt/caFile for the CA certificate -// The keys should adhere to a single convention, i.e. a Secret with tls.key -// and certFile is invalid. -// -// checkType is a boolean indicating whether to check the Secret type. If true -// and the Secret's type is not blank, Opaque or kubernetes.io/tls, then an -// error is returned. -func tlsClientConfigFromSecret(secret corev1.Secret, url string, kubernetesTLSKeys bool, checkType bool) (*tls.Config, *TLSBytes, error) { - if checkType { - // Only Secrets of type Opaque and TLS are allowed. We also allow Secrets with a blank - // type, to avoid having to specify the type of the Secret for every test case. - // Since a real Kubernetes Secret is of type Opaque by default, its safe to allow this. - switch secret.Type { - case corev1.SecretTypeOpaque, corev1.SecretTypeTLS, "": - default: - return nil, nil, fmt.Errorf("cannot use secret '%s' to construct TLS config: invalid secret type: '%s'", secret.Name, secret.Type) - } - } - - var certBytes, keyBytes, caBytes []byte - if kubernetesTLSKeys { - certBytes, keyBytes, caBytes = secret.Data[corev1.TLSCertKey], secret.Data[corev1.TLSPrivateKeyKey], secret.Data[CACrtKey] - } else { - certBytes, keyBytes, caBytes = secret.Data["certFile"], secret.Data["keyFile"], secret.Data["caFile"] - } - - switch { - case len(certBytes)+len(keyBytes)+len(caBytes) == 0: - return nil, nil, nil - case (len(certBytes) > 0 && len(keyBytes) == 0) || (len(keyBytes) > 0 && len(certBytes) == 0): - return nil, nil, fmt.Errorf("invalid '%s' secret data: both certificate and private key need to be provided", - secret.Name) - } - - tlsConf := &tls.Config{ - MinVersion: tls.VersionTLS12, - } - if len(certBytes) > 0 && len(keyBytes) > 0 { - cert, err := tls.X509KeyPair(certBytes, keyBytes) - if err != nil { - return nil, nil, err - } - tlsConf.Certificates = append(tlsConf.Certificates, cert) - } - - if len(caBytes) > 0 { - cp, err := x509.SystemCertPool() - if err != nil { - return nil, nil, fmt.Errorf("cannot retrieve system certificate pool: %w", err) - } - if !cp.AppendCertsFromPEM(caBytes) { - return nil, nil, fmt.Errorf("cannot append certificate into certificate pool: invalid CA certificate") - } - - tlsConf.RootCAs = cp - } - - if url != "" { - u, err := neturl.Parse(url) - if err != nil { - return nil, nil, fmt.Errorf("cannot parse repository URL: %w", err) - } - - tlsConf.ServerName = u.Hostname() - } - - return tlsConf, &TLSBytes{ - CertBytes: certBytes, - KeyBytes: keyBytes, - CABytes: caBytes, - }, nil -} diff --git a/internal/tls/config_test.go b/internal/tls/config_test.go deleted file mode 100644 index 949142a07..000000000 --- a/internal/tls/config_test.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Copyright 2023 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package tls - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "fmt" - "math/big" - "net/url" - "testing" - - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" -) - -func Test_tlsClientConfigFromSecret(t *testing.T) { - kubernetesTlsSecretFixture := validTlsSecret(t, true) - tlsSecretFixture := validTlsSecret(t, false) - - tests := []struct { - name string - secret corev1.Secret - modify func(secret *corev1.Secret) - tlsKeys bool - checkType bool - url string - wantErr bool - wantNil bool - }{ - { - name: "tls.crt, tls.key and ca.crt", - secret: kubernetesTlsSecretFixture, - modify: nil, - tlsKeys: true, - url: "https://example.com", - }, - { - name: "certFile, keyFile and caFile", - secret: tlsSecretFixture, - modify: nil, - tlsKeys: false, - url: "https://example.com", - }, - { - name: "without tls.crt", - secret: kubernetesTlsSecretFixture, - modify: func(s *corev1.Secret) { delete(s.Data, "tls.crt") }, - tlsKeys: true, - wantErr: true, - wantNil: true, - }, - { - name: "without tls.key", - secret: kubernetesTlsSecretFixture, - modify: func(s *corev1.Secret) { delete(s.Data, "tls.key") }, - tlsKeys: true, - wantErr: true, - wantNil: true, - }, - { - name: "without ca.crt", - secret: kubernetesTlsSecretFixture, - modify: func(s *corev1.Secret) { delete(s.Data, "ca.crt") }, - tlsKeys: true, - }, - { - name: "empty secret", - secret: corev1.Secret{}, - tlsKeys: true, - wantNil: true, - }, - { - name: "docker config secret with type checking enabled", - secret: tlsSecretFixture, - modify: func(secret *corev1.Secret) { secret.Type = corev1.SecretTypeDockerConfigJson }, - tlsKeys: false, - checkType: true, - wantErr: true, - wantNil: true, - }, - { - name: "docker config secret with type checking disabled", - secret: tlsSecretFixture, - modify: func(secret *corev1.Secret) { secret.Type = corev1.SecretTypeDockerConfigJson }, - tlsKeys: false, - url: "https://example.com", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - secret := tt.secret.DeepCopy() - if tt.modify != nil { - tt.modify(secret) - } - - tlsConfig, _, err := tlsClientConfigFromSecret(*secret, tt.url, tt.tlsKeys, tt.checkType) - g.Expect(err != nil).To(Equal(tt.wantErr), fmt.Sprintf("expected error: %v, got: %v", tt.wantErr, err)) - g.Expect(tlsConfig == nil).To(Equal(tt.wantNil)) - if tt.url != "" { - u, _ := url.Parse(tt.url) - g.Expect(u.Hostname()).To(Equal(tlsConfig.ServerName)) - } - }) - } -} - -// validTlsSecret creates a secret containing key pair and CA certificate that are -// valid from a syntax (minimum requirements) perspective. -func validTlsSecret(t *testing.T, kubernetesTlsKeys bool) corev1.Secret { - t.Helper() - key, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - t.Fatal("Private key cannot be created.", err.Error()) - } - - certTemplate := x509.Certificate{ - SerialNumber: big.NewInt(1337), - } - cert, err := x509.CreateCertificate(rand.Reader, &certTemplate, &certTemplate, &key.PublicKey, key) - if err != nil { - t.Fatal("Certificate cannot be created.", err.Error()) - } - - ca := &x509.Certificate{ - SerialNumber: big.NewInt(7331), - IsCA: true, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - } - - caPrivKey, err := rsa.GenerateKey(rand.Reader, 4096) - if err != nil { - t.Fatal("CA private key cannot be created.", err.Error()) - } - - caBytes, err := x509.CreateCertificate(rand.Reader, ca, ca, &caPrivKey.PublicKey, caPrivKey) - if err != nil { - t.Fatal("CA certificate cannot be created.", err.Error()) - } - - keyPem := pem.EncodeToMemory(&pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(key), - }) - - certPem := pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: cert, - }) - - caPem := pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: caBytes, - }) - - crtKey := corev1.TLSCertKey - pkKey := corev1.TLSPrivateKeyKey - caKey := CACrtKey - if !kubernetesTlsKeys { - crtKey = "certFile" - pkKey = "keyFile" - caKey = "caFile" - } - return corev1.Secret{ - Data: map[string][]byte{ - crtKey: []byte(certPem), - pkKey: []byte(keyPem), - caKey: []byte(caPem), - }, - } -} diff --git a/internal/transport/transport.go b/internal/transport/transport.go deleted file mode 100644 index 89286df71..000000000 --- a/internal/transport/transport.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package transport - -import ( - "crypto/tls" - "fmt" - "net" - "net/http" - "sync" - "time" -) - -// TransportPool is a progressive and non-blocking pool -// for http.Transport objects, optimised for Gargabe Collection -// and without a hard limit on number of objects created. -// -// Its main purpose is to enable for transport objects to be -// used across helm chart download requests and helm/pkg/getter -// instances by leveraging the getter.WithTransport(t) construct. -// -// The use of this pool improves the default behaviour of helm getter -// which creates a new connection per request, or per getter instance, -// resulting on unnecessary TCP connections with the target. -// -// http.Transport objects may contain sensitive material and also have -// settings that may impact the security of HTTP operations using -// them (i.e. InsecureSkipVerify). Therefore, ensure that they are -// used in a thread-safe way, and also by reseting TLS specific state -// after each use. -// -// Calling the Release(t) function will reset TLS specific state whilst -// also releasing the transport back to the pool to be reused. -// -// xref: https://github.com/helm/helm/pull/10568 -// xref2: https://github.com/fluxcd/source-controller/issues/578 -type TransportPool struct { -} - -var pool = &sync.Pool{ - New: func() interface{} { - return &http.Transport{ - DisableCompression: true, - Proxy: http.ProxyFromEnvironment, - - // Due to the non blocking nature of this approach, - // at peak usage a higher number of transport objects - // may be created. sync.Pool will ensure they are - // gargage collected when/if needed. - // - // By setting a low value to IdleConnTimeout the connections - // will be closed after that period of inactivity, allowing the - // transport to be garbage collected. - IdleConnTimeout: 60 * time.Second, - - // use safe defaults based off http.DefaultTransport - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } - }, -} - -// NewOrIdle tries to return an existing transport that is not currently being used. -// If none is found, creates a new Transport instead. -// -// tlsConfig can optionally set the TLSClientConfig for the transport. -func NewOrIdle(tlsConfig *tls.Config) *http.Transport { - t := pool.Get().(*http.Transport) - t.TLSClientConfig = tlsConfig - - return t -} - -// Release releases the transport back to the TransportPool after -// sanitising its sensitive fields. -func Release(transport *http.Transport) error { - if transport == nil { - return fmt.Errorf("cannot release nil transport") - } - - transport.TLSClientConfig = nil - - pool.Put(transport) - return nil -} diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go deleted file mode 100644 index f0bc387d6..000000000 --- a/internal/transport/transport_test.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2022 The Flux authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package transport - -import ( - "crypto/tls" - "testing" -) - -func Test_TransportReuse(t *testing.T) { - t1 := NewOrIdle(nil) - t2 := NewOrIdle(nil) - - if t1 == t2 { - t.Errorf("same transported returned twice") - } - - err := Release(t2) - if err != nil { - t.Errorf("error releasing transport t2: %v", err) - } - - t3 := NewOrIdle(&tls.Config{ - ServerName: "testing", - }) - if t3.TLSClientConfig == nil || t3.TLSClientConfig.ServerName != "testing" { - t.Errorf("TLSClientConfig not properly configured") - } - - err = Release(t3) - if err != nil { - t.Errorf("error releasing transport t3: %v", err) - } - if t3.TLSClientConfig != nil { - t.Errorf("TLSClientConfig not cleared after release") - } - - err = Release(nil) - if err == nil { - t.Errorf("should not allow release nil transport") - } else if err.Error() != "cannot release nil transport" { - t.Errorf("wanted error message: 'cannot release nil transport' got: %q", err.Error()) - } -} diff --git a/main.go b/main.go index 5c0296c25..cb019e6e4 100644 --- a/main.go +++ b/main.go @@ -18,8 +18,6 @@ package main import ( "fmt" - "net" - "net/http" "os" "time" @@ -39,6 +37,11 @@ import ( ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + artcfg "github.com/fluxcd/pkg/artifact/config" + artdigest "github.com/fluxcd/pkg/artifact/digest" + artsrv "github.com/fluxcd/pkg/artifact/server" + artstore "github.com/fluxcd/pkg/artifact/storage" + "github.com/fluxcd/pkg/auth" pkgcache "github.com/fluxcd/pkg/cache" "github.com/fluxcd/pkg/git" "github.com/fluxcd/pkg/runtime/client" @@ -52,14 +55,12 @@ import ( "github.com/fluxcd/pkg/runtime/pprof" "github.com/fluxcd/pkg/runtime/probes" - v1 "github.com/fluxcd/source-controller/api/v1" - "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" // +kubebuilder:scaffold:imports "github.com/fluxcd/source-controller/internal/cache" "github.com/fluxcd/source-controller/internal/controller" - intdigest "github.com/fluxcd/source-controller/internal/digest" "github.com/fluxcd/source-controller/internal/features" "github.com/fluxcd/source-controller/internal/helm" "github.com/fluxcd/source-controller/internal/helm/registry" @@ -85,42 +86,37 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(v1beta2.AddToScheme(scheme)) - utilruntime.Must(v1.AddToScheme(scheme)) + utilruntime.Must(sourcev1.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme } func main() { const ( - tokenCacheDefaultMaxSize = 0 + tokenCacheDefaultMaxSize = 100 ) var ( - metricsAddr string - eventsAddr string - healthAddr string - storagePath string - storageAddr string - storageAdvAddr string - concurrent int - requeueDependency time.Duration - helmIndexLimit int64 - helmChartLimit int64 - helmChartFileLimit int64 - clientOptions client.Options - logOptions logger.Options - leaderElectionOptions leaderelection.Options - rateLimiterOptions helper.RateLimiterOptions - featureGates feathelper.FeatureGates - watchOptions helper.WatchOptions - intervalJitterOptions jitter.IntervalOptions - helmCacheMaxSize int - helmCacheTTL string - helmCachePurgeInterval string - artifactRetentionTTL time.Duration - artifactRetentionRecords int - artifactDigestAlgo string - tokenCacheOptions pkgcache.TokenFlags + metricsAddr string + eventsAddr string + healthAddr string + concurrent int + requeueDependency time.Duration + helmIndexLimit int64 + helmChartLimit int64 + helmChartFileLimit int64 + artifactOptions artcfg.Options + clientOptions client.Options + logOptions logger.Options + leaderElectionOptions leaderelection.Options + rateLimiterOptions helper.RateLimiterOptions + featureGates feathelper.FeatureGates + watchOptions helper.WatchOptions + intervalJitterOptions jitter.IntervalOptions + helmCacheMaxSize int + helmCacheTTL string + helmCachePurgeInterval string + tokenCacheOptions pkgcache.TokenFlags + defaultServiceAccount string ) flag.StringVar(&metricsAddr, "metrics-addr", envOrDefault("METRICS_ADDR", ":8080"), @@ -128,12 +124,6 @@ func main() { flag.StringVar(&eventsAddr, "events-addr", envOrDefault("EVENTS_ADDR", ""), "The address of the events receiver.") flag.StringVar(&healthAddr, "health-addr", ":9440", "The address the health endpoint binds to.") - flag.StringVar(&storagePath, "storage-path", envOrDefault("STORAGE_PATH", ""), - "The local storage path.") - flag.StringVar(&storageAddr, "storage-addr", envOrDefault("STORAGE_ADDR", ":9090"), - "The address the static file server binds to.") - flag.StringVar(&storageAdvAddr, "storage-adv-addr", envOrDefault("STORAGE_ADV_ADDR", ""), - "The advertised address of the static file server.") flag.IntVar(&concurrent, "concurrent", 2, "The number of concurrent reconciles per controller.") flag.Int64Var(&helmIndexLimit, "helm-index-max-size", helm.MaxIndexSize, "The max allowed size in bytes of a Helm repository index file.") @@ -153,13 +143,10 @@ func main() { "The list of key exchange algorithms to use for ssh connections, arranged from most preferred to the least.") flag.StringSliceVar(&git.HostKeyAlgos, "ssh-hostkey-algos", []string{}, "The list of hostkey algorithms to use for ssh connections, arranged from most preferred to the least.") - flag.DurationVar(&artifactRetentionTTL, "artifact-retention-ttl", 60*time.Second, - "The duration of time that artifacts from previous reconciliations will be kept in storage before being garbage collected.") - flag.IntVar(&artifactRetentionRecords, "artifact-retention-records", 2, - "The maximum number of artifacts to be kept in storage after a garbage collection.") - flag.StringVar(&artifactDigestAlgo, "artifact-digest-algo", intdigest.Canonical.String(), - "The algorithm to use to calculate the digest of artifacts.") + flag.StringVar(&defaultServiceAccount, auth.ControllerFlagDefaultServiceAccount, + "", "Default service account to use for workload identity when not specified in resources.") + artifactOptions.BindFlags(flag.CommandLine) clientOptions.BindFlags(flag.CommandLine) logOptions.BindFlags(flag.CommandLine) leaderElectionOptions.BindFlags(flag.CommandLine) @@ -173,11 +160,28 @@ func main() { logger.SetLogger(logger.NewLogger(logOptions)) + if defaultServiceAccount != "" { + auth.SetDefaultServiceAccount(defaultServiceAccount) + } + if err := featureGates.WithLogger(setupLog).SupportedFeatures(features.FeatureGates()); err != nil { setupLog.Error(err, "unable to load feature gates") os.Exit(1) } + switch enabled, err := features.Enabled(auth.FeatureGateObjectLevelWorkloadIdentity); { + case err != nil: + setupLog.Error(err, "unable to check feature gate "+auth.FeatureGateObjectLevelWorkloadIdentity) + os.Exit(1) + case enabled: + auth.EnableObjectLevelWorkloadIdentity() + } + + if auth.InconsistentObjectLevelConfiguration() { + setupLog.Error(auth.ErrInconsistentObjectLevelConfiguration, "invalid configuration") + os.Exit(1) + } + if err := intervalJitterOptions.SetGlobalJitter(nil); err != nil { setupLog.Error(err, "unable to set global jitter") os.Exit(1) @@ -187,10 +191,22 @@ func main() { probes.SetupChecks(mgr, setupLog) - metrics := helper.NewMetrics(mgr, metrics.MustMakeRecorder(), v1.SourceFinalizer) + metrics := helper.NewMetrics(mgr, metrics.MustMakeRecorder(), sourcev1.SourceFinalizer) cacheRecorder := cache.MustMakeMetrics() eventRecorder := mustSetupEventRecorder(mgr, eventsAddr, controllerName) - storage := mustInitStorage(storagePath, storageAdvAddr, artifactRetentionTTL, artifactRetentionRecords, artifactDigestAlgo) + + algo, err := artdigest.AlgorithmForName(artifactOptions.ArtifactDigestAlgo) + if err != nil { + setupLog.Error(err, "unable to configure canonical digest algorithm") + os.Exit(1) + } + artdigest.Canonical = algo + + storage, err := artstore.New(&artifactOptions) + if err != nil { + setupLog.Error(err, "unable to configure artifact storage") + os.Exit(1) + } mustSetupHelmLimits(helmIndexLimit, helmChartLimit, helmChartFileLimit) helmIndexCache, helmIndexCacheItemTTL := mustInitHelmCache(helmCacheMaxSize, helmCacheTTL, helmCachePurgeInterval) @@ -216,12 +232,12 @@ func main() { Metrics: metrics, Storage: storage, ControllerName: controllerName, + TokenCache: tokenCache, }).SetupWithManagerAndOptions(mgr, controller.GitRepositoryReconcilerOptions{ DependencyRequeueInterval: requeueDependency, RateLimiter: helper.GetRateLimiter(rateLimiterOptions), - TokenCache: tokenCache, }); err != nil { - setupLog.Error(err, "unable to create controller", "controller", v1.GitRepositoryKind) + setupLog.Error(err, "unable to create controller", "controller", sourcev1.GitRepositoryKind) os.Exit(1) } @@ -238,7 +254,7 @@ func main() { }).SetupWithManagerAndOptions(mgr, controller.HelmRepositoryReconcilerOptions{ RateLimiter: helper.GetRateLimiter(rateLimiterOptions), }); err != nil { - setupLog.Error(err, "unable to create controller", "controller", v1.HelmRepositoryKind) + setupLog.Error(err, "unable to create controller", "controller", sourcev1.HelmRepositoryKind) os.Exit(1) } @@ -256,7 +272,7 @@ func main() { }).SetupWithManagerAndOptions(ctx, mgr, controller.HelmChartReconcilerOptions{ RateLimiter: helper.GetRateLimiter(rateLimiterOptions), }); err != nil { - setupLog.Error(err, "unable to create controller", "controller", v1.HelmChartKind) + setupLog.Error(err, "unable to create controller", "controller", sourcev1.HelmChartKind) os.Exit(1) } @@ -266,10 +282,11 @@ func main() { Metrics: metrics, Storage: storage, ControllerName: controllerName, + TokenCache: tokenCache, }).SetupWithManagerAndOptions(mgr, controller.BucketReconcilerOptions{ RateLimiter: helper.GetRateLimiter(rateLimiterOptions), }); err != nil { - setupLog.Error(err, "unable to create controller", "controller", v1.BucketKind) + setupLog.Error(err, "unable to create controller", "controller", sourcev1.BucketKind) os.Exit(1) } @@ -278,11 +295,12 @@ func main() { Storage: storage, EventRecorder: eventRecorder, ControllerName: controllerName, + TokenCache: tokenCache, Metrics: metrics, }).SetupWithManagerAndOptions(mgr, controller.OCIRepositoryReconcilerOptions{ RateLimiter: helper.GetRateLimiter(rateLimiterOptions), }); err != nil { - setupLog.Error(err, "unable to create controller", "controller", v1beta2.OCIRepositoryKind) + setupLog.Error(err, "unable to create controller", "controller", sourcev1.OCIRepositoryKind) os.Exit(1) } // +kubebuilder:scaffold:builder @@ -293,7 +311,11 @@ func main() { // to handle that. <-mgr.Elected() - startFileServer(storage.BasePath, storageAddr) + // Start the artifact server if running as leader. + if err := artsrv.Start(ctx, &artifactOptions); err != nil { + setupLog.Error(err, "artifact server error") + os.Exit(1) + } }() setupLog.Info("starting manager") @@ -303,17 +325,6 @@ func main() { } } -func startFileServer(path string, address string) { - setupLog.Info("starting file server") - fs := http.FileServer(http.Dir(path)) - mux := http.NewServeMux() - mux.Handle("/", fs) - err := http.ListenAndServe(address, mux) - if err != nil { - setupLog.Error(err, "file server error") - } -} - func mustSetupEventRecorder(mgr ctrl.Manager, eventsAddr, controllerName string) record.EventRecorder { eventRecorder, err := events.NewRecorder(mgr, ctrl.Log, eventsAddr, controllerName) if err != nil { @@ -370,11 +381,11 @@ func mustSetupManager(metricsAddr, healthAddr string, maxConcurrent int, }, Cache: ctrlcache.Options{ ByObject: map[ctrlclient.Object]ctrlcache.ByObject{ - &v1.GitRepository{}: {Label: watchSelector}, - &v1.HelmRepository{}: {Label: watchSelector}, - &v1.HelmChart{}: {Label: watchSelector}, - &v1.Bucket{}: {Label: watchSelector}, - &v1beta2.OCIRepository{}: {Label: watchSelector}, + &sourcev1.GitRepository{}: {Label: watchSelector}, + &sourcev1.HelmRepository{}: {Label: watchSelector}, + &sourcev1.HelmChart{}: {Label: watchSelector}, + &sourcev1.Bucket{}: {Label: watchSelector}, + &sourcev1.OCIRepository{}: {Label: watchSelector}, }, }, Metrics: metricsserver.Options{ @@ -428,51 +439,6 @@ func mustInitHelmCache(maxSize int, itemTTL, purgeInterval string) (*cache.Cache return cache.New(maxSize, interval), ttl } -func mustInitStorage(path string, storageAdvAddr string, artifactRetentionTTL time.Duration, artifactRetentionRecords int, artifactDigestAlgo string) *controller.Storage { - if storageAdvAddr == "" { - storageAdvAddr = determineAdvStorageAddr(storageAdvAddr) - } - - if artifactDigestAlgo != intdigest.Canonical.String() { - algo, err := intdigest.AlgorithmForName(artifactDigestAlgo) - if err != nil { - setupLog.Error(err, "unable to configure canonical digest algorithm") - os.Exit(1) - } - intdigest.Canonical = algo - } - - storage, err := controller.NewStorage(path, storageAdvAddr, artifactRetentionTTL, artifactRetentionRecords) - if err != nil { - setupLog.Error(err, "unable to initialise storage") - os.Exit(1) - } - return storage -} - -func determineAdvStorageAddr(storageAddr string) string { - host, port, err := net.SplitHostPort(storageAddr) - if err != nil { - setupLog.Error(err, "unable to parse storage address") - os.Exit(1) - } - switch host { - case "": - host = "localhost" - case "0.0.0.0": - host = os.Getenv("HOSTNAME") - if host == "" { - hn, err := os.Hostname() - if err != nil { - setupLog.Error(err, "0.0.0.0 specified in storage addr but hostname is invalid") - os.Exit(1) - } - host = hn - } - } - return net.JoinHostPort(host, port) -} - func envOrDefault(envName, defaultValue string) string { ret := os.Getenv(envName) if ret != "" { diff --git a/tests/fuzz/Dockerfile.builder b/tests/fuzz/Dockerfile.builder index 48b15f60d..0b45115bb 100644 --- a/tests/fuzz/Dockerfile.builder +++ b/tests/fuzz/Dockerfile.builder @@ -1,9 +1,9 @@ FROM gcr.io/oss-fuzz-base/base-builder-go -RUN wget https://go.dev/dl/go1.23.0.linux-amd64.tar.gz \ +RUN wget https://go.dev/dl/go1.24.0.linux-amd64.tar.gz \ && mkdir temp-go \ && rm -rf /root/.go/* \ - && tar -C temp-go/ -xzf go1.23.0.linux-amd64.tar.gz \ + && tar -C temp-go/ -xzf go1.24.0.linux-amd64.tar.gz \ && mv temp-go/go/* /root/.go/ ENV SRC=$GOPATH/src/github.com/fluxcd/source-controller diff --git a/tests/listener/listener.go b/tests/listener/listener.go index 390008d75..289b2adf0 100644 --- a/tests/listener/listener.go +++ b/tests/listener/listener.go @@ -22,7 +22,7 @@ import ( "strings" "testing" - "gotest.tools/assert" + . "github.com/onsi/gomega" ) // New creates a TCP listener on a random port and returns @@ -33,14 +33,15 @@ func New(t *testing.T) (net.Listener, string, int) { t.Helper() lis, err := net.Listen("tcp", "localhost:0") - assert.NilError(t, err) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) t.Cleanup(func() { lis.Close() }) addr := lis.Addr().String() addrParts := strings.Split(addr, ":") portStr := addrParts[len(addrParts)-1] port, err := strconv.Atoi(portStr) - assert.NilError(t, err) + g.Expect(err).NotTo(HaveOccurred()) return lis, addr, port } diff --git a/tests/registry/registry.go b/tests/registry/registry.go index 74ee117c7..28b36fd20 100644 --- a/tests/registry/registry.go +++ b/tests/registry/registry.go @@ -31,8 +31,8 @@ import ( "github.com/google/go-containerregistry/pkg/crane" gcrv1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/mutate" + . "github.com/onsi/gomega" "github.com/sirupsen/logrus" - "gotest.tools/assert" "github.com/fluxcd/pkg/oci" @@ -45,7 +45,8 @@ func New(t *testing.T) string { // Get a free random port and release it so the registry can use it. listener, addr, _ := testlistener.New(t) err := listener.Close() - assert.NilError(t, err) + g := NewWithT(t) + g.Expect(err).NotTo(HaveOccurred()) config := &configuration.Configuration{} config.HTTP.Addr = addr @@ -56,7 +57,7 @@ func New(t *testing.T) string { logrus.SetOutput(io.Discard) r, err := registry.NewRegistry(context.Background(), config) - assert.NilError(t, err) + g.Expect(err).NotTo(HaveOccurred()) go r.ListenAndServe()